diff --git a/DEPS b/DEPS index bc5aac6..21d6753 100644 --- a/DEPS +++ b/DEPS
@@ -44,7 +44,7 @@ # Three lines of non-changing comments so that # the commit queue can handle CLs rolling V8 # and whatever else without interference from each other. - 'v8_revision': '8f21326365dc45e3c2f8b13feaa967e68466f2d6', + 'v8_revision': '6dc8ebe23b3ff39962a3ecb9d9411b47c470483c', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling swarming_client # and whatever else without interference from each other. @@ -96,7 +96,7 @@ # Three lines of non-changing comments so that # the commit queue can handle CLs rolling catapult # and whatever else without interference from each other. - 'catapult_revision': '8cbbd7f6c6207e388918855a3536c5b7f30bff29', + 'catapult_revision': '1dd41870427d5872bbb28f501efe8bfc2e01ec12', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling libFuzzer # and whatever else without interference from each other.
diff --git a/apps/saved_files_service.cc b/apps/saved_files_service.cc index 1c56f0644..9d61e8d 100644 --- a/apps/saved_files_service.cc +++ b/apps/saved_files_service.cc
@@ -107,7 +107,7 @@ // Clears all SavedFileEntry for the app from ExtensionPrefs. void ClearSavedFileEntries(ExtensionPrefs* prefs, const std::string& extension_id) { - prefs->UpdateExtensionPref(extension_id, kFileEntries, NULL); + prefs->UpdateExtensionPref(extension_id, kFileEntries, nullptr); } // Returns all SavedFileEntries for the app.
diff --git a/ash/mus/window_manager_unittest.cc b/ash/mus/window_manager_unittest.cc index 4fb7e82..84f23a4 100644 --- a/ash/mus/window_manager_unittest.cc +++ b/ash/mus/window_manager_unittest.cc
@@ -99,7 +99,8 @@ properties[ui::mojom::WindowManager::kWindowType_InitProperty] = mojo::ConvertTo<std::vector<uint8_t>>( static_cast<int32_t>(ui::mojom::WindowType::WINDOW)); - aura::WindowTreeHostMus window_tree_host_mus(&client, &properties); + aura::WindowTreeHostMus window_tree_host_mus(&client, cc::FrameSinkId(), + &properties); window_tree_host_mus.InitHost(); aura::Window* child_window = new aura::Window(nullptr); child_window->Init(ui::LAYER_NOT_DRAWN);
diff --git a/chrome/app/generated_resources.grd b/chrome/app/generated_resources.grd index 8f8689b..1f27238 100644 --- a/chrome/app/generated_resources.grd +++ b/chrome/app/generated_resources.grd
@@ -12777,12 +12777,6 @@ Dark Theme </message> - <message name="IDS_FLAGS_NEW_PROFILE_MANAGEMENT_NAME" desc="Title for the flag for the new profile management." translateable="false"> - New profile management system - </message> - <message name="IDS_FLAGS_NEW_PROFILE_MANAGEMENT_DESCRIPTION" desc="Description for the flag for the new profile management." translateable="false"> - Enables the new profile management system, including profile lock and the new avatar menu UI. - </message> <message name="IDS_FLAGS_ACCOUNT_CONSISTENCY_NAME" desc="Title for the flag for account consistency between browser and cookie jar." translateable="false"> Identity consistency between browser and cookie jar </message>
diff --git a/chrome/browser/BUILD.gn b/chrome/browser/BUILD.gn index 99b8407..637c004 100644 --- a/chrome/browser/BUILD.gn +++ b/chrome/browser/BUILD.gn
@@ -4525,8 +4525,6 @@ visibility = [ "//chrome/test:test_support_ui" ] sources = [ - "password_manager/password_manager_test_base.cc", - "password_manager/password_manager_test_base.h", "signin/token_revoker_test_utils.cc", "signin/token_revoker_test_utils.h", "ui/webui/signin/login_ui_test_utils.cc", @@ -4535,6 +4533,8 @@ if (!is_android) { sources += [ + "password_manager/password_manager_test_base.cc", + "password_manager/password_manager_test_base.h", "ui/webui/web_ui_test_handler.cc", "ui/webui/web_ui_test_handler.h", ]
diff --git a/chrome/browser/about_flags.cc b/chrome/browser/about_flags.cc index 72549bd..d69a100 100644 --- a/chrome/browser/about_flags.cc +++ b/chrome/browser/about_flags.cc
@@ -1167,8 +1167,7 @@ IDS_FLAGS_OVERSCROLL_HISTORY_NAVIGATION_NAME, IDS_FLAGS_OVERSCROLL_HISTORY_NAVIGATION_DESCRIPTION, kOsAura, MULTI_VALUE_TYPE(kOverscrollHistoryNavigationChoices)}, - {"overscroll-start-threshold", - IDS_FLAGS_OVERSCROLL_START_THRESHOLD_NAME, + {"overscroll-start-threshold", IDS_FLAGS_OVERSCROLL_START_THRESHOLD_NAME, IDS_FLAGS_OVERSCROLL_START_THRESHOLD_DESCRIPTION, kOsAura, MULTI_VALUE_TYPE(kOverscrollStartThresholdChoices)}, #endif // USE_AURA @@ -1339,14 +1338,9 @@ {"enable-webgl-draft-extensions", IDS_FLAGS_WEBGL_DRAFT_EXTENSIONS_NAME, IDS_FLAGS_WEBGL_DRAFT_EXTENSIONS_DESCRIPTION, kOsAll, SINGLE_VALUE_TYPE(switches::kEnableWebGLDraftExtensions)}, - {"enable-new-profile-management", IDS_FLAGS_NEW_PROFILE_MANAGEMENT_NAME, - IDS_FLAGS_NEW_PROFILE_MANAGEMENT_DESCRIPTION, kOsAll, - ENABLE_DISABLE_VALUE_TYPE(switches::kEnableNewProfileManagement, - switches::kDisableNewProfileManagement)}, {"enable-account-consistency", IDS_FLAGS_ACCOUNT_CONSISTENCY_NAME, IDS_FLAGS_ACCOUNT_CONSISTENCY_DESCRIPTION, kOsAll, - ENABLE_DISABLE_VALUE_TYPE(switches::kEnableAccountConsistency, - switches::kDisableAccountConsistency)}, + SINGLE_VALUE_TYPE(switches::kEnableAccountConsistency)}, {"enable-password-separated-signin-flow", IDS_FLAGS_ENABLE_PASSWORD_SEPARATED_SIGNIN_FLOW_NAME, IDS_FLAGS_ENABLE_PASSWORD_SEPARATED_SIGNIN_FLOW_DESCRIPTION,
diff --git a/chrome/browser/android/logo_bridge.cc b/chrome/browser/android/logo_bridge.cc index cd6284ac..82de78a 100644 --- a/chrome/browser/android/logo_bridge.cc +++ b/chrome/browser/android/logo_bridge.cc
@@ -299,6 +299,9 @@ return; } + UMA_HISTOGRAM_BOOLEAN("NewTabPage.LogoImageDownloaded", + metadata.from_http_cache); + ScopedJavaLocalRef<jobject> j_logo = MakeJavaLogo( env, image.ToSkBitmap(), on_click_url, alt_text, animated_image_url); Java_LogoObserver_onLogoAvailable(env, j_logo_observer_, j_logo,
diff --git a/chrome/browser/browser_resources.grd b/chrome/browser/browser_resources.grd index 736341f..23f1bfb 100644 --- a/chrome/browser/browser_resources.grd +++ b/chrome/browser/browser_resources.grd
@@ -262,7 +262,6 @@ <include name="IDR_MD_BOOKMARKS_APP_HTML" file="resources\md_bookmarks\app.html" type="BINDATA" /> <include name="IDR_MD_BOOKMARKS_APP_JS" file="resources\md_bookmarks\app.js" type="BINDATA" /> <include name="IDR_MD_BOOKMARKS_BOOKMARKS_HTML" file="resources\md_bookmarks\bookmarks.html" type="BINDATA" /> - <include name="IDR_MD_BOOKMARKS_BOOKMARKS_STORE_JS" file="resources\md_bookmarks\bookmarks_store.js" type="BINDATA" /> <include name="IDR_MD_BOOKMARKS_EDIT_DIALOG_HTML" file="resources\md_bookmarks\edit_dialog.html" type="BINDATA" /> <include name="IDR_MD_BOOKMARKS_EDIT_DIALOG_JS" file="resources\md_bookmarks\edit_dialog.js" type="BINDATA" /> <include name="IDR_MD_BOOKMARKS_FOLDER_NODE_HTML" file="resources\md_bookmarks\folder_node.html" type="BINDATA" />
diff --git a/chrome/browser/extensions/api/commands/command_service.cc b/chrome/browser/extensions/api/commands/command_service.cc index 5b43144..772a939 100644 --- a/chrome/browser/extensions/api/commands/command_service.cc +++ b/chrome/browser/extensions/api/commands/command_service.cc
@@ -8,6 +8,7 @@ #include <vector> #include "base/lazy_instance.h" +#include "base/memory/ptr_util.h" #include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" @@ -85,7 +86,7 @@ void SetInitialBindingsHaveBeenAssigned( ExtensionPrefs* prefs, const std::string& extension_id) { prefs->UpdateExtensionPref(extension_id, kInitialBindingsHaveBeenAssigned, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); } bool InitialBindingsHaveBeenAssigned( @@ -114,9 +115,8 @@ suggested_key_prefs = std::move(new_prefs); } - extension_prefs->UpdateExtensionPref(extension_id, - kCommands, - suggested_key_prefs.release()); + extension_prefs->UpdateExtensionPref(extension_id, kCommands, + std::move(suggested_key_prefs)); } } // namespace @@ -719,9 +719,8 @@ } } - extension_prefs->UpdateExtensionPref(extension->id(), - kCommands, - suggested_key_prefs.release()); + extension_prefs->UpdateExtensionPref(extension->id(), kCommands, + std::move(suggested_key_prefs)); } }
diff --git a/chrome/browser/extensions/api/content_settings/content_settings_store.cc b/chrome/browser/extensions/api/content_settings/content_settings_store.cc index 0b1c48d..8bed231 100644 --- a/chrome/browser/extensions/api/content_settings/content_settings_store.cc +++ b/chrome/browser/extensions/api/content_settings/content_settings_store.cc
@@ -245,7 +245,7 @@ } } -base::ListValue* ContentSettingsStore::GetSettingsForExtension( +std::unique_ptr<base::ListValue> ContentSettingsStore::GetSettingsForExtension( const std::string& extension_id, ExtensionPrefsScope scope) const { base::AutoLock lock(lock_); @@ -253,7 +253,7 @@ if (!map) return nullptr; - base::ListValue* settings = new base::ListValue(); + auto settings = base::MakeUnique<base::ListValue>(); for (const auto& it : *map) { const auto& key = it.first; std::unique_ptr<RuleIterator> rule_iterator(
diff --git a/chrome/browser/extensions/api/content_settings/content_settings_store.h b/chrome/browser/extensions/api/content_settings/content_settings_store.h index 0567613..3a9033a4 100644 --- a/chrome/browser/extensions/api/content_settings/content_settings_store.h +++ b/chrome/browser/extensions/api/content_settings/content_settings_store.h
@@ -80,8 +80,9 @@ // Serializes all content settings set by the extension with ID |extension_id| // and returns them as a ListValue. The caller takes ownership of the returned // value. - base::ListValue* GetSettingsForExtension(const std::string& extension_id, - ExtensionPrefsScope scope) const; + std::unique_ptr<base::ListValue> GetSettingsForExtension( + const std::string& extension_id, + ExtensionPrefsScope scope) const; // Deserializes content settings rules from |list| and applies them as set by // the extension with ID |extension_id|.
diff --git a/chrome/browser/extensions/api/declarative/rules_registry_with_cache_unittest.cc b/chrome/browser/extensions/api/declarative/rules_registry_with_cache_unittest.cc index ec4d41a..d87d88f 100644 --- a/chrome/browser/extensions/api/declarative/rules_registry_with_cache_unittest.cc +++ b/chrome/browser/extensions/api/declarative/rules_registry_with_cache_unittest.cc
@@ -9,6 +9,7 @@ // RulesRegistryWithCache. #include "base/command_line.h" +#include "base/memory/ptr_util.h" #include "base/run_loop.h" #include "chrome/browser/extensions/extension_service.h" #include "chrome/browser/extensions/test_extension_environment.h" @@ -242,11 +243,11 @@ EXPECT_TRUE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); extension_prefs->UpdateExtensionPref(extension1_->id(), rules_stored_key, - new base::Value(false)); + base::MakeUnique<base::Value>(false)); EXPECT_FALSE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); extension_prefs->UpdateExtensionPref(extension1_->id(), rules_stored_key, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); EXPECT_TRUE(cache_delegate->GetDeclarativeRulesStored(extension1_->id())); // 2. Test writing behavior. @@ -325,7 +326,7 @@ // Update the flag for the first registry. extension_prefs->UpdateExtensionPref(extension1_->id(), rules_stored_key1, - new base::Value(false)); + base::MakeUnique<base::Value>(false)); EXPECT_FALSE(cache_delegate1->GetDeclarativeRulesStored(extension1_->id())); EXPECT_TRUE(cache_delegate2->GetDeclarativeRulesStored(extension1_->id())); }
diff --git a/chrome/browser/extensions/api/device_permissions_manager_unittest.cc b/chrome/browser/extensions/api/device_permissions_manager_unittest.cc index 6ff4e53..4ee780d 100644 --- a/chrome/browser/extensions/api/device_permissions_manager_unittest.cc +++ b/chrome/browser/extensions/api/device_permissions_manager_unittest.cc
@@ -301,7 +301,7 @@ " }" "]"); env_->GetExtensionPrefs()->UpdateExtensionPref(extension_->id(), "devices", - prefs_value.release()); + std::move(prefs_value)); DevicePermissionsManager* manager = DevicePermissionsManager::Get(env_->profile());
diff --git a/chrome/browser/extensions/api/extension_action/extension_action_api.cc b/chrome/browser/extensions/api/extension_action/extension_action_api.cc index 9ced1ff..36d7be9 100644 --- a/chrome/browser/extensions/api/extension_action/extension_action_api.cc +++ b/chrome/browser/extensions/api/extension_action/extension_action_api.cc
@@ -163,8 +163,9 @@ if (GetBrowserActionVisibility(extension_id) == visible) return; - GetExtensionPrefs()->UpdateExtensionPref(extension_id, kBrowserActionVisible, - new base::Value(visible)); + GetExtensionPrefs()->UpdateExtensionPref( + extension_id, kBrowserActionVisible, + base::MakeUnique<base::Value>(visible)); for (auto& observer : observers_) observer.OnExtensionActionVisibilityChanged(extension_id, visible); }
diff --git a/chrome/browser/extensions/api/file_system/file_system_api.cc b/chrome/browser/extensions/api/file_system/file_system_api.cc index 0563e0f..ca8fe51 100644 --- a/chrome/browser/extensions/api/file_system/file_system_api.cc +++ b/chrome/browser/extensions/api/file_system/file_system_api.cc
@@ -293,9 +293,8 @@ void SetLastChooseEntryDirectory(ExtensionPrefs* prefs, const std::string& extension_id, const base::FilePath& path) { - prefs->UpdateExtensionPref(extension_id, - kLastChooseEntryDirectory, - base::CreateFilePathValue(path)); + prefs->UpdateExtensionPref(extension_id, kLastChooseEntryDirectory, + base::WrapUnique(base::CreateFilePathValue(path))); } #if defined(OS_CHROMEOS)
diff --git a/chrome/browser/extensions/api/input_ime/input_ime_api_nonchromeos.cc b/chrome/browser/extensions/api/input_ime/input_ime_api_nonchromeos.cc index f7663ed..ebb5cb9 100644 --- a/chrome/browser/extensions/api/input_ime/input_ime_api_nonchromeos.cc +++ b/chrome/browser/extensions/api/input_ime/input_ime_api_nonchromeos.cc
@@ -148,7 +148,8 @@ // input.ime.activate API has been never called since loaded. Profile* profile = Profile::FromBrowserContext(browser_context); ExtensionPrefs::Get(profile)->UpdateExtensionPref( - extension->id(), kPrefNeverActivatedSinceLoaded, new base::Value(true)); + extension->id(), kPrefNeverActivatedSinceLoaded, + base::MakeUnique<base::Value>(true)); } void InputImeAPI::OnExtensionUnloaded(content::BrowserContext* browser_context, @@ -160,7 +161,7 @@ // Records the extension is not the last active IME engine. ExtensionPrefs::Get(Profile::FromBrowserContext(browser_context)) ->UpdateExtensionPref(extension->id(), kPrefLastActiveEngine, - new base::Value(false)); + base::MakeUnique<base::Value>(false)); event_router->DeleteInputMethodEngine(extension->id()); } } @@ -188,7 +189,7 @@ // Records the extension is the last active IME engine. ExtensionPrefs::Get(GetProfile()) ->UpdateExtensionPref(extension_id, kPrefLastActiveEngine, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); if (active_engine_) { if (active_engine_->GetExtensionId() == extension_id) { active_engine_->Enable(std::string()); @@ -198,7 +199,8 @@ // Records the extension is not the last active IME engine. ExtensionPrefs::Get(GetProfile()) ->UpdateExtensionPref(active_engine_->GetExtensionId(), - kPrefLastActiveEngine, new base::Value(false)); + kPrefLastActiveEngine, + base::MakeUnique<base::Value>(false)); DeleteInputMethodEngine(active_engine_->GetExtensionId()); } @@ -249,12 +251,14 @@ // chrome. No need for user gesture checking. event_router->SetActiveEngine(extension_id()); ExtensionPrefs::Get(profile)->UpdateExtensionPref( - extension_id(), kPrefNeverActivatedSinceLoaded, new base::Value(false)); + extension_id(), kPrefNeverActivatedSinceLoaded, + base::MakeUnique<base::Value>(false)); return RespondNow(NoArguments()); } // The API has already been called at least once. ExtensionPrefs::Get(profile)->UpdateExtensionPref( - extension_id(), kPrefNeverActivatedSinceLoaded, new base::Value(false)); + extension_id(), kPrefNeverActivatedSinceLoaded, + base::MakeUnique<base::Value>(false)); // Otherwise, this API is only allowed to be called from a user action. if (!user_gesture()) @@ -315,7 +319,8 @@ // Updates the extension preference if user checks the 'Never show this // again' check box. So we can activate the extension directly next time. ExtensionPrefs::Get(profile)->UpdateExtensionPref( - extension_id(), kPrefWarningBubbleNeverShow, new base::Value(true)); + extension_id(), kPrefWarningBubbleNeverShow, + base::MakeUnique<base::Value>(true)); } Respond(NoArguments());
diff --git a/chrome/browser/extensions/api/module/module.cc b/chrome/browser/extensions/api/module/module.cc index 2cec950..4319d28 100644 --- a/chrome/browser/extensions/api/module/module.cc +++ b/chrome/browser/extensions/api/module/module.cc
@@ -47,7 +47,7 @@ ExtensionPrefs::Get(browser_context()) ->UpdateExtensionPref(extension_id(), extension::kUpdateURLData, - new base::Value(data)); + base::MakeUnique<base::Value>(data)); return RespondNow(NoArguments()); }
diff --git a/chrome/browser/extensions/api/omnibox/omnibox_api.cc b/chrome/browser/extensions/api/omnibox/omnibox_api.cc index dab6aa3b..b37d22d9 100644 --- a/chrome/browser/extensions/api/omnibox/omnibox_api.cc +++ b/chrome/browser/extensions/api/omnibox/omnibox_api.cc
@@ -75,9 +75,8 @@ // Add the content field so that the dictionary can be used to populate an // omnibox::SuggestResult. dict->SetWithoutPathExpansion(kSuggestionContent, new base::Value("")); - prefs->UpdateExtensionPref(extension_id, - kOmniboxDefaultSuggestion, - dict.release()); + prefs->UpdateExtensionPref(extension_id, kOmniboxDefaultSuggestion, + std::move(dict)); return true; }
diff --git a/chrome/browser/extensions/api/signed_in_devices/signed_in_devices_api.cc b/chrome/browser/extensions/api/signed_in_devices/signed_in_devices_api.cc index 096316a5..93d9ac9 100644 --- a/chrome/browser/extensions/api/signed_in_devices/signed_in_devices_api.cc +++ b/chrome/browser/extensions/api/signed_in_devices/signed_in_devices_api.cc
@@ -41,10 +41,8 @@ std::unique_ptr<base::DictionaryValue> dictionary( new base::DictionaryValue()); out_value = dictionary.get(); - extension_prefs->UpdateExtensionPref( - extension_id, - kPrefStringForIdMapping, - dictionary.release()); + extension_prefs->UpdateExtensionPref(extension_id, kPrefStringForIdMapping, + std::move(dictionary)); } return out_value; @@ -73,9 +71,8 @@ CreateMappingForUnmappedDevices(devices, editable_mapping_dictionary.get()); // Write into |ExtensionPrefs| which will get persisted in disk. - extension_prefs->UpdateExtensionPref(extension_id, - kPrefStringForIdMapping, - editable_mapping_dictionary.release()); + extension_prefs->UpdateExtensionPref(extension_id, kPrefStringForIdMapping, + std::move(editable_mapping_dictionary)); return devices; }
diff --git a/chrome/browser/extensions/chrome_app_sorting.cc b/chrome/browser/extensions/chrome_app_sorting.cc index c924513..b25ae4b 100644 --- a/chrome/browser/extensions/chrome_app_sorting.cc +++ b/chrome/browser/extensions/chrome_app_sorting.cc
@@ -5,6 +5,7 @@ #include "chrome/browser/extensions/chrome_app_sorting.h" #include <algorithm> +#include <utility> #include <vector> #include "base/macros.h" @@ -113,7 +114,7 @@ page = PageIntegerAsStringOrdinal(old_page_index); SetPageOrdinal(*ext_id, page); - prefs->UpdateExtensionPref(*ext_id, kPrefPageIndexDeprecated, NULL); + prefs->UpdateExtensionPref(*ext_id, kPrefPageIndexDeprecated, nullptr); } int old_app_launch_index = 0; @@ -128,7 +129,8 @@ if (page.IsValid()) app_launches_to_convert[page][old_app_launch_index] = &*ext_id; - prefs->UpdateExtensionPref(*ext_id, kPrefAppLaunchIndexDeprecated, NULL); + prefs->UpdateExtensionPref(*ext_id, kPrefAppLaunchIndexDeprecated, + nullptr); } } @@ -321,15 +323,15 @@ extension_id, page_ordinal, GetAppLaunchOrdinal(extension_id)); AddOrdinalMapping(extension_id, page_ordinal, new_app_launch_ordinal); - base::Value* new_value = + std::unique_ptr<base::Value> new_value = new_app_launch_ordinal.IsValid() - ? new base::Value(new_app_launch_ordinal.ToInternalValue()) - : NULL; + ? base::MakeUnique<base::Value>( + new_app_launch_ordinal.ToInternalValue()) + : nullptr; - ExtensionPrefs::Get(browser_context_)->UpdateExtensionPref( - extension_id, - kPrefAppLaunchOrdinal, - new_value); + ExtensionPrefs::Get(browser_context_) + ->UpdateExtensionPref(extension_id, kPrefAppLaunchOrdinal, + std::move(new_value)); SyncIfNeeded(extension_id); } @@ -401,15 +403,14 @@ extension_id, GetPageOrdinal(extension_id), app_launch_ordinal); AddOrdinalMapping(extension_id, new_page_ordinal, app_launch_ordinal); - base::Value* new_value = + std::unique_ptr<base::Value> new_value = new_page_ordinal.IsValid() - ? new base::Value(new_page_ordinal.ToInternalValue()) - : NULL; + ? base::MakeUnique<base::Value>(new_page_ordinal.ToInternalValue()) + : nullptr; - ExtensionPrefs::Get(browser_context_)->UpdateExtensionPref( - extension_id, - kPrefPageOrdinal, - new_value); + ExtensionPrefs::Get(browser_context_) + ->UpdateExtensionPref(extension_id, kPrefPageOrdinal, + std::move(new_value)); SyncIfNeeded(extension_id); } @@ -419,8 +420,8 @@ GetAppLaunchOrdinal(extension_id)); ExtensionPrefs* prefs = ExtensionPrefs::Get(browser_context_); - prefs->UpdateExtensionPref(extension_id, kPrefPageOrdinal, NULL); - prefs->UpdateExtensionPref(extension_id, kPrefAppLaunchOrdinal, NULL); + prefs->UpdateExtensionPref(extension_id, kPrefPageOrdinal, nullptr); + prefs->UpdateExtensionPref(extension_id, kPrefAppLaunchOrdinal, nullptr); } int ChromeAppSorting::PageStringOrdinalAsInteger(
diff --git a/chrome/browser/extensions/chrome_app_sorting_unittest.cc b/chrome/browser/extensions/chrome_app_sorting_unittest.cc index 7400e85..753642e 100644 --- a/chrome/browser/extensions/chrome_app_sorting_unittest.cc +++ b/chrome/browser/extensions/chrome_app_sorting_unittest.cc
@@ -152,20 +152,26 @@ // Setup the deprecated preferences. ExtensionScopedPrefs* scoped_prefs = static_cast<ExtensionScopedPrefs*>(prefs()); - scoped_prefs->UpdateExtensionPref( - extension1()->id(), kPrefAppLaunchIndexDeprecated, new base::Value(0)); - scoped_prefs->UpdateExtensionPref( - extension1()->id(), kPrefPageIndexDeprecated, new base::Value(0)); + scoped_prefs->UpdateExtensionPref(extension1()->id(), + kPrefAppLaunchIndexDeprecated, + base::MakeUnique<base::Value>(0)); + scoped_prefs->UpdateExtensionPref(extension1()->id(), + kPrefPageIndexDeprecated, + base::MakeUnique<base::Value>(0)); - scoped_prefs->UpdateExtensionPref( - extension2()->id(), kPrefAppLaunchIndexDeprecated, new base::Value(1)); - scoped_prefs->UpdateExtensionPref( - extension2()->id(), kPrefPageIndexDeprecated, new base::Value(0)); + scoped_prefs->UpdateExtensionPref(extension2()->id(), + kPrefAppLaunchIndexDeprecated, + base::MakeUnique<base::Value>(1)); + scoped_prefs->UpdateExtensionPref(extension2()->id(), + kPrefPageIndexDeprecated, + base::MakeUnique<base::Value>(0)); - scoped_prefs->UpdateExtensionPref( - extension3()->id(), kPrefAppLaunchIndexDeprecated, new base::Value(0)); - scoped_prefs->UpdateExtensionPref( - extension3()->id(), kPrefPageIndexDeprecated, new base::Value(1)); + scoped_prefs->UpdateExtensionPref(extension3()->id(), + kPrefAppLaunchIndexDeprecated, + base::MakeUnique<base::Value>(0)); + scoped_prefs->UpdateExtensionPref(extension3()->id(), + kPrefPageIndexDeprecated, + base::MakeUnique<base::Value>(1)); // We insert the ids in reverse order so that we have to deal with the // element on the 2nd page before the 1st page is seen. @@ -253,10 +259,12 @@ // Setup the deprecated preference. ExtensionScopedPrefs* scoped_prefs = static_cast<ExtensionScopedPrefs*>(prefs()); - scoped_prefs->UpdateExtensionPref( - extension1()->id(), kPrefAppLaunchIndexDeprecated, new base::Value(0)); - scoped_prefs->UpdateExtensionPref( - extension1()->id(), kPrefPageIndexDeprecated, new base::Value(-1)); + scoped_prefs->UpdateExtensionPref(extension1()->id(), + kPrefAppLaunchIndexDeprecated, + base::MakeUnique<base::Value>(0)); + scoped_prefs->UpdateExtensionPref(extension1()->id(), + kPrefPageIndexDeprecated, + base::MakeUnique<base::Value>(-1)); } void Verify() override { // Make sure that the invalid page_index wasn't converted over.
diff --git a/chrome/browser/extensions/error_console/error_console.cc b/chrome/browser/extensions/error_console/error_console.cc index 12de1608..079171d8 100644 --- a/chrome/browser/extensions/error_console/error_console.cc +++ b/chrome/browser/extensions/error_console/error_console.cc
@@ -102,7 +102,7 @@ mask &= ~(1 << type); prefs_->UpdateExtensionPref(extension_id, kStoreExtensionErrorsPref, - new base::Value(mask)); + base::MakeUnique<base::Value>(mask)); } void ErrorConsole::SetReportingAllForExtension( @@ -114,7 +114,7 @@ int mask = enabled ? (1 << ExtensionError::NUM_ERROR_TYPES) - 1 : 0; prefs_->UpdateExtensionPref(extension_id, kStoreExtensionErrorsPref, - new base::Value(mask)); + base::MakeUnique<base::Value>(mask)); } bool ErrorConsole::IsReportingEnabledForExtension( @@ -132,7 +132,7 @@ if (!enabled_ || !crx_file::id_util::IdIsValid(extension_id)) return; - prefs_->UpdateExtensionPref(extension_id, kStoreExtensionErrorsPref, NULL); + prefs_->UpdateExtensionPref(extension_id, kStoreExtensionErrorsPref, nullptr); } void ErrorConsole::ReportError(std::unique_ptr<ExtensionError> error) {
diff --git a/chrome/browser/extensions/extension_message_bubble_controller.cc b/chrome/browser/extensions/extension_message_bubble_controller.cc index 0d56e48..268b913 100644 --- a/chrome/browser/extensions/extension_message_bubble_controller.cc +++ b/chrome/browser/extensions/extension_message_bubble_controller.cc
@@ -77,8 +77,9 @@ if (pref_name.empty()) return; extensions::ExtensionPrefs* prefs = extensions::ExtensionPrefs::Get(profile_); - prefs->UpdateExtensionPref(extension_id, pref_name, - value ? new base::Value(value) : NULL); + prefs->UpdateExtensionPref( + extension_id, pref_name, + value ? base::MakeUnique<base::Value>(value) : nullptr); } std::string
diff --git a/chrome/browser/extensions/extension_message_bubble_controller_unittest.cc b/chrome/browser/extensions/extension_message_bubble_controller_unittest.cc index 232e913..c93f79c 100644 --- a/chrome/browser/extensions/extension_message_bubble_controller_unittest.cc +++ b/chrome/browser/extensions/extension_message_bubble_controller_unittest.cc
@@ -916,7 +916,7 @@ ExtensionPrefs* prefs) { std::string time_str = base::Int64ToString(time.ToInternalValue()); prefs->UpdateExtensionPref(extension_id, "install_time", - new base::Value(time_str)); + base::MakeUnique<base::Value>(time_str)); } // The feature this is meant to test is only implemented on Windows and Mac.
diff --git a/chrome/browser/extensions/extension_storage_monitor.cc b/chrome/browser/extensions/extension_storage_monitor.cc index 6c88c23..9e9efc21 100644 --- a/chrome/browser/extensions/extension_storage_monitor.cc +++ b/chrome/browser/extensions/extension_storage_monitor.cc
@@ -608,8 +608,9 @@ int64_t next_threshold) { extension_prefs_->UpdateExtensionPref( extension_id, kPrefNextStorageThreshold, - next_threshold > 0 ? new base::Value(base::Int64ToString(next_threshold)) - : NULL); + next_threshold > 0 + ? base::MakeUnique<base::Value>(base::Int64ToString(next_threshold)) + : nullptr); } int64_t ExtensionStorageMonitor::GetNextStorageThresholdFromPrefs( @@ -644,7 +645,7 @@ bool enable_notifications) { extension_prefs_->UpdateExtensionPref( extension_id, kPrefDisableStorageNotifications, - enable_notifications ? NULL : new base::Value(true)); + enable_notifications ? nullptr : base::MakeUnique<base::Value>(true)); } } // namespace extensions
diff --git a/chrome/browser/extensions/extension_util.cc b/chrome/browser/extensions/extension_util.cc index ba4c470..213de8ed 100644 --- a/chrome/browser/extensions/extension_util.cc +++ b/chrome/browser/extensions/extension_util.cc
@@ -199,7 +199,7 @@ ExtensionPrefs::Get(context)->UpdateExtensionPref( extension_id, kWasInstalledByCustodianPrefName, - installed_by_custodian ? new base::Value(true) : nullptr); + installed_by_custodian ? base::MakeUnique<base::Value>(true) : nullptr); ExtensionService* service = ExtensionSystem::Get(context)->extension_service();
diff --git a/chrome/browser/extensions/launch_util.cc b/chrome/browser/extensions/launch_util.cc index 7280711..b911fca 100644 --- a/chrome/browser/extensions/launch_util.cc +++ b/chrome/browser/extensions/launch_util.cc
@@ -83,7 +83,7 @@ ExtensionPrefs::Get(context)->UpdateExtensionPref( extension_id, kPrefLaunchType, - new base::Value(static_cast<int>(launch_type))); + base::MakeUnique<base::Value>(static_cast<int>(launch_type))); // Sync the launch type. const Extension* extension =
diff --git a/chrome/browser/extensions/scripting_permissions_modifier.cc b/chrome/browser/extensions/scripting_permissions_modifier.cc index e78cae6b..288dd2f 100644 --- a/chrome/browser/extensions/scripting_permissions_modifier.cc +++ b/chrome/browser/extensions/scripting_permissions_modifier.cc
@@ -4,6 +4,7 @@ #include "chrome/browser/extensions/scripting_permissions_modifier.h" +#include "base/memory/ptr_util.h" #include "chrome/browser/extensions/extension_sync_service.h" #include "chrome/browser/extensions/permissions_updater.h" #include "extensions/browser/extension_prefs.h" @@ -58,10 +59,10 @@ const std::string& id, ExtensionPrefs* prefs) { prefs->UpdateExtensionPref(id, kExtensionAllowedOnAllUrlsPrefName, - new base::Value(allowed)); + base::MakeUnique<base::Value>(allowed)); if (by_user) { prefs->UpdateExtensionPref(id, kHasSetScriptOnAllUrlsPrefName, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); } } @@ -309,7 +310,7 @@ DCHECK(ExtensionMustBeAllowedOnAllUrls(*extension_)); extension_prefs_->UpdateExtensionPref(extension_->id(), kExtensionAllowedOnAllUrlsPrefName, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); extension_prefs_->UpdateExtensionPref( extension_->id(), kHasSetScriptOnAllUrlsPrefName, nullptr); }
diff --git a/chrome/browser/extensions/scripting_permissions_modifier_unittest.cc b/chrome/browser/extensions/scripting_permissions_modifier_unittest.cc index d369633..18d5db7 100644 --- a/chrome/browser/extensions/scripting_permissions_modifier_unittest.cc +++ b/chrome/browser/extensions/scripting_permissions_modifier_unittest.cc
@@ -357,9 +357,9 @@ const char* kHasSetPref = "has_set_script_all_urls"; ExtensionPrefs* prefs = ExtensionPrefs::Get(profile()); prefs->UpdateExtensionPref(extension->id(), kAllowedPref, - new base::Value(false)); + base::MakeUnique<base::Value>(false)); prefs->UpdateExtensionPref(extension->id(), kHasSetPref, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); // The modifier should still return the correct value and should fix the // preferences.
diff --git a/chrome/browser/notifications/notification_platform_bridge_mac.mm b/chrome/browser/notifications/notification_platform_bridge_mac.mm index 9017890..c12ba14c 100644 --- a/chrome/browser/notifications/notification_platform_bridge_mac.mm +++ b/chrome/browser/notifications/notification_platform_bridge_mac.mm
@@ -474,12 +474,11 @@ }; xpcConnection_.get().invalidationHandler = ^{ + // This means that the connection should be recreated if it needs + // to be used again. LOG(WARNING) << "AlertNotificationService: XPC connection invalidated."; RecordXPCEvent(INVALIDATED); setExceptionPort_ = NO; - // This means that the connection should be recreated if it needs - // to be used again. It should not really happen. - DCHECK(false) << "XPC Connection invalidated"; }; xpcConnection_.get().exportedInterface =
diff --git a/chrome/browser/password_manager/credential_manager_browsertest.cc b/chrome/browser/password_manager/credential_manager_browsertest.cc index 95fad6ec..f88f0284 100644 --- a/chrome/browser/password_manager/credential_manager_browsertest.cc +++ b/chrome/browser/password_manager/credential_manager_browsertest.cc
@@ -162,9 +162,8 @@ EXPECT_TRUE(base::ContainsKey(passwords, www_url.spec())); } -// Flaky. See crbug.com/703305 IN_PROC_BROWSER_TEST_F(CredentialManagerBrowserTest, - DISABLED_ObsoleteHttpCredentialMovedOnMigrationToHstsSite) { + ObsoleteHttpCredentialMovedOnMigrationToHstsSite) { // Add an http credential to the password store. GURL https_origin = https_test_server().base_url(); ASSERT_TRUE(https_origin.SchemeIs(url::kHttpsScheme)); @@ -191,29 +190,13 @@ ui_test_utils::NavigateToURL( browser(), https_test_server().GetURL("/password/done.html")); - // Call the API to trigger |get| and |store| and redirect. + // Call the API to trigger the account chooser. ASSERT_TRUE(content::ExecuteScript( RenderViewHost(), "navigator.credentials.get({password: true})")); + BubbleObserver(WebContents()).WaitForAccountChooser(); - // Issue the query for HTTPS credentials. + // Wait for the migration logic to actually touch the password store. WaitForPasswordStore(); - - // Realize there are no HTTPS credentials and issue the query for HTTP - // credentials instead. - WaitForPasswordStore(); - - // Sync with IO thread before continuing. This is necessary, because the - // credential migration triggers a query for the HSTS state which gets - // executed on the IO thread. The actual task is empty, because only the reply - // is relevant. By the time the reply is executed it is guaranteed that the - // migration is completed. - const auto empty_lambda = []() {}; - base::RunLoop run_loop; - content::BrowserThread::PostTaskAndReply(content::BrowserThread::IO, - FROM_HERE, base::Bind(empty_lambda), - run_loop.QuitClosure()); - run_loop.Run(); - // Only HTTPS passwords should be present. EXPECT_TRUE( password_store->stored_passwords().at(http_origin.spec()).empty());
diff --git a/chrome/browser/password_manager/password_manager_browsertest.cc b/chrome/browser/password_manager/password_manager_browsertest.cc index cb0e771..5719a73 100644 --- a/chrome/browser/password_manager/password_manager_browsertest.cc +++ b/chrome/browser/password_manager/password_manager_browsertest.cc
@@ -322,23 +322,23 @@ // Fill a form and submit through a <input type="submit"> button. The form // points to a redirection page. - NavigationObserver observer(WebContents()); - std::unique_ptr<BubbleObserver> prompt_observer( - new BubbleObserver(WebContents())); + NavigationObserver observer1(WebContents()); std::string fill_and_submit = "document.getElementById('username_redirect').value = 'temp';" "document.getElementById('password_redirect').value = 'random';" "document.getElementById('submit_redirect').click()"; ASSERT_TRUE(content::ExecuteScript(RenderViewHost(), fill_and_submit)); - observer.Wait(); - EXPECT_TRUE(prompt_observer->IsShowingSavePrompt()); + observer1.Wait(); + BubbleObserver bubble_observer(WebContents()); + EXPECT_TRUE(bubble_observer.IsShowingSavePrompt()); // The redirection page now redirects via Javascript. We check that the - // infobar stays. + // bubble stays. + NavigationObserver observer2(WebContents()); ASSERT_TRUE(content::ExecuteScript(RenderViewHost(), "window.location.href = 'done.html';")); - observer.Wait(); - EXPECT_TRUE(prompt_observer->IsShowingSavePrompt()); + observer2.Wait(); + EXPECT_TRUE(bubble_observer.IsShowingSavePrompt()); } IN_PROC_BROWSER_TEST_F(PasswordManagerBrowserTestBase, @@ -1285,7 +1285,7 @@ static_cast<password_manager::TestPasswordStore*>( PasswordStoreFactory::GetForProfile( browser()->profile(), ServiceAccessType::IMPLICIT_ACCESS).get()); - EXPECT_TRUE(password_store->IsEmpty()); + ASSERT_TRUE(password_store->IsEmpty()); // Navigate to a page requiring HTTP auth. Wait for the tab to get the correct // WebContents, but don't wait for navigation, which only finishes after @@ -1295,11 +1295,10 @@ WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_TAB); - content::NavigationController* nav_controller = - &WebContents()->GetController(); - NavigationObserver nav_observer(WebContents()); - std::unique_ptr<BubbleObserver> prompt_observer( - new BubbleObserver(WebContents())); + content::WebContents* tab = + browser()->tab_strip_model()->GetActiveWebContents(); + content::NavigationController* nav_controller = &tab->GetController(); + NavigationObserver nav_observer(tab); WindowedAuthNeededObserver auth_needed_observer(nav_controller); auth_needed_observer.Wait(); @@ -1314,13 +1313,14 @@ // The password manager should be working correctly. nav_observer.Wait(); - EXPECT_TRUE(prompt_observer->IsShowingSavePrompt()); - prompt_observer->AcceptSavePrompt(); + WaitForPasswordStore(); + BubbleObserver bubble_observer(tab); + EXPECT_TRUE(bubble_observer.IsShowingSavePrompt()); + bubble_observer.AcceptSavePrompt(); // Spin the message loop to make sure the password store had a chance to save // the password. - base::RunLoop run_loop; - run_loop.RunUntilIdle(); + WaitForPasswordStore(); EXPECT_FALSE(password_store->IsEmpty()); } @@ -1777,6 +1777,19 @@ IN_PROC_BROWSER_TEST_F(PasswordManagerBrowserTestBase, InFrameNavigationDoesNotClearPopupState) { + scoped_refptr<password_manager::TestPasswordStore> password_store = + static_cast<password_manager::TestPasswordStore*>( + PasswordStoreFactory::GetForProfile( + browser()->profile(), ServiceAccessType::IMPLICIT_ACCESS) + .get()); + autofill::PasswordForm signin_form; + signin_form.signon_realm = embedded_test_server()->base_url().spec(); + signin_form.username_value = base::ASCIIToUTF16("temp"); + signin_form.password_value = base::ASCIIToUTF16("random123"); + password_store->AddLogin(signin_form); + + NavigateToFile("/password/password_form.html"); + // Mock out the AutofillClient so we know how long to wait. Unfortunately // there isn't otherwise a good event to wait on to verify that the popup // would have been shown. @@ -1792,23 +1805,6 @@ driver->GetPasswordAutofillManager()->set_autofill_client( observing_autofill_client); - NavigateToFile("/password/password_form.html"); - - NavigationObserver form_submit_observer(WebContents()); - std::unique_ptr<BubbleObserver> prompt_observer( - new BubbleObserver(WebContents())); - std::string fill = - "document.getElementById('username_field').value = 'temp';" - "document.getElementById('password_field').value = 'random123';" - "document.getElementById('input_submit_button').click();"; - - // Save credentials for the site. - ASSERT_TRUE(content::ExecuteScript(RenderViewHost(), fill)); - form_submit_observer.Wait(); - EXPECT_TRUE(prompt_observer->IsShowingSavePrompt()); - prompt_observer->AcceptSavePrompt(); - - NavigateToFile("/password/password_form.html"); ASSERT_TRUE(content::ExecuteScript( RenderViewHost(), "var usernameRect = document.getElementById('username_field')" @@ -1830,9 +1826,9 @@ "window.domAutomationController.send(usernameRect.left);", &left)); - content::SimulateMouseClickAt( - WebContents(), 0, blink::WebMouseEvent::Button::Left, gfx::Point(left + 1, - top + 1)); + content::SimulateMouseClickAt(WebContents(), 0, + blink::WebMouseEvent::Button::Left, + gfx::Point(left + 1, top + 1)); // Make sure the popup would be shown. observing_autofill_client->Wait(); } @@ -2915,7 +2911,8 @@ browser(), embedded_test_server()->GetURL("/password/password_form.html"), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_NAVIGATION); - content::WebContents* forms_web_contents = WebContents(); + content::WebContents* forms_web_contents = + browser()->tab_strip_model()->GetActiveWebContents(); // The renderer queries the availability of logging on start-up. However, it // can take too long to propagate that message from the browser back to the @@ -3243,6 +3240,16 @@ ASSERT_TRUE( base::FeatureList::IsEnabled(security_state::kHttpFormWarningFeature)); + // We need to serve from a non-localhost context for the form to be treated as + // Not Secure. + host_resolver()->AddRule("example.com", "127.0.0.1"); + NavigationObserver observer(WebContents()); + ui_test_utils::NavigateToURL( + browser(), embedded_test_server()->GetURL( + "example.com", "/password/password_form.html")); + observer.Wait(); + + // Mock the autofill client. password_manager::ContentPasswordManagerDriverFactory* driver_factory = password_manager::ContentPasswordManagerDriverFactory::FromWebContents( WebContents()); @@ -3255,15 +3262,6 @@ driver->GetPasswordAutofillManager()->set_autofill_client( observing_autofill_client); - // We need to serve from a non-localhost context for the form to be treated as - // Not Secure. - host_resolver()->AddRule("example.com", "127.0.0.1"); - NavigationObserver observer(WebContents()); - ui_test_utils::NavigateToURL( - browser(), embedded_test_server()->GetURL( - "example.com", "/password/password_form.html")); - observer.Wait(); - ASSERT_TRUE(content::ExecuteScript( RenderViewHost(), "var inputRect = document.getElementById('username_field_no_name')" @@ -3298,6 +3296,16 @@ ASSERT_TRUE( base::FeatureList::IsEnabled(security_state::kHttpFormWarningFeature)); + // We need to serve from a non-localhost context for the form to be treated as + // Not Secure. + host_resolver()->AddRule("example.com", "127.0.0.1"); + NavigationObserver observer(WebContents()); + ui_test_utils::NavigateToURL( + browser(), embedded_test_server()->GetURL( + "example.com", "/password/password_form.html")); + observer.Wait(); + + // Mock the autofill client. password_manager::ContentPasswordManagerDriverFactory* driver_factory = password_manager::ContentPasswordManagerDriverFactory::FromWebContents( WebContents()); @@ -3310,15 +3318,6 @@ driver->GetPasswordAutofillManager()->set_autofill_client( observing_autofill_client); - // We need to serve from a non-localhost context for the form to be treated as - // Not Secure. - host_resolver()->AddRule("example.com", "127.0.0.1"); - NavigationObserver observer(WebContents()); - ui_test_utils::NavigateToURL( - browser(), embedded_test_server()->GetURL( - "example.com", "/password/password_form.html")); - observer.Wait(); - ASSERT_TRUE(content::ExecuteScript( RenderViewHost(), "var inputRect = document.getElementById('ef_extra')"
diff --git a/chrome/browser/password_manager/password_manager_test_base.cc b/chrome/browser/password_manager/password_manager_test_base.cc index 9ac7ec53..e51a9926 100644 --- a/chrome/browser/password_manager/password_manager_test_base.cc +++ b/chrome/browser/password_manager/password_manager_test_base.cc
@@ -8,11 +8,12 @@ #include "base/macros.h" #include "base/memory/ref_counted.h" -#include "base/run_loop.h" #include "base/strings/stringprintf.h" +#include "chrome/browser/password_manager/chrome_password_manager_client.h" #include "chrome/browser/password_manager/password_store_factory.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/profiles/profile_io_data.h" +#include "chrome/browser/ui/autofill/chrome_autofill_client.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/passwords/manage_passwords_ui_controller.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" @@ -53,6 +54,56 @@ DISALLOW_COPY_AND_ASSIGN(PasswordStoreResultsObserver); }; +// ManagePasswordsUIController subclass to capture the UI events. +class CustomManagePasswordsUIController : public ManagePasswordsUIController { + public: + explicit CustomManagePasswordsUIController( + content::WebContents* web_contents); + + void WaitForAccountChooser(); + + private: + // PasswordsClientUIDelegate: + bool OnChooseCredentials( + std::vector<std::unique_ptr<autofill::PasswordForm>> local_credentials, + const GURL& origin, + const ManagePasswordsState::CredentialsCallback& callback) override; + + // The loop to be stopped when the account chooser pops up. + base::RunLoop* account_chooser_loop_ = nullptr; + + DISALLOW_COPY_AND_ASSIGN(CustomManagePasswordsUIController); +}; + +CustomManagePasswordsUIController::CustomManagePasswordsUIController( + content::WebContents* web_contents) + : ManagePasswordsUIController(web_contents) { + // Attach CustomManagePasswordsUIController to |web_contents| so the default + // ManagePasswordsUIController isn't created. + // Do not silently replace an existing ManagePasswordsUIController because it + // unregisters itself in WebContentsDestroyed(). + EXPECT_FALSE(web_contents->GetUserData(UserDataKey())); + web_contents->SetUserData(UserDataKey(), this); +} + +void CustomManagePasswordsUIController::WaitForAccountChooser() { + base::RunLoop account_chooser_loop; + account_chooser_loop_ = &account_chooser_loop; + account_chooser_loop_->Run(); +} + +bool CustomManagePasswordsUIController::OnChooseCredentials( + std::vector<std::unique_ptr<autofill::PasswordForm>> local_credentials, + const GURL& origin, + const ManagePasswordsState::CredentialsCallback& callback) { + if (account_chooser_loop_) { + account_chooser_loop_->Quit(); + account_chooser_loop_ = nullptr; + } + return ManagePasswordsUIController::OnChooseCredentials( + std::move(local_credentials), origin, callback); +} + void AddHSTSHostImpl( const scoped_refptr<net::URLRequestContextGetter>& request_context, const std::string& host) { @@ -74,9 +125,7 @@ NavigationObserver::NavigationObserver(content::WebContents* web_contents) : content::WebContentsObserver(web_contents), - quit_on_entry_committed_(false), - message_loop_runner_(new content::MessageLoopRunner) { -} + quit_on_entry_committed_(false) {} NavigationObserver::~NavigationObserver() { } @@ -86,7 +135,7 @@ return; if (quit_on_entry_committed_) - message_loop_runner_->Quit(); + run_loop_.Quit(); } void NavigationObserver::DidFinishLoad( @@ -95,14 +144,14 @@ render_frame_host_ = render_frame_host; if (!wait_for_path_.empty()) { if (validated_url.path() == wait_for_path_) - message_loop_runner_->Quit(); + run_loop_.Quit(); } else if (!render_frame_host->GetParent()) { - message_loop_runner_->Quit(); + run_loop_.Quit(); } } void NavigationObserver::Wait() { - message_loop_runner_->Run(); + run_loop_.Run(); } BubbleObserver::BubbleObserver(content::WebContents* web_contents) @@ -138,11 +187,21 @@ EXPECT_FALSE(IsShowingUpdatePrompt()); } -PasswordManagerBrowserTestBase::PasswordManagerBrowserTestBase() - : https_test_server_(net::EmbeddedTestServer::TYPE_HTTPS) {} -PasswordManagerBrowserTestBase::~PasswordManagerBrowserTestBase() { +void BubbleObserver::WaitForAccountChooser() const { + if (passwords_ui_controller_->GetState() == + password_manager::ui::CREDENTIAL_REQUEST_STATE) + return; + CustomManagePasswordsUIController* controller = + static_cast<CustomManagePasswordsUIController*>(passwords_ui_controller_); + controller->WaitForAccountChooser(); } +PasswordManagerBrowserTestBase::PasswordManagerBrowserTestBase() + : https_test_server_(net::EmbeddedTestServer::TYPE_HTTPS), + web_contents_(nullptr) {} + +PasswordManagerBrowserTestBase::~PasswordManagerBrowserTestBase() = default; + void PasswordManagerBrowserTestBase::SetUpOnMainThread() { // Use TestPasswordStore to remove a possible race. Normally the // PasswordStore does its database manipulation on the DB thread, which @@ -168,6 +227,31 @@ verify_result.is_issued_by_known_root = true; verify_result.verified_cert = cert; mock_cert_verifier().AddResultForCert(cert.get(), verify_result, net::OK); + + // Add a tab with a customized ManagePasswordsUIController. Thus, we can + // intercept useful UI events. + content::WebContents* tab = + browser()->tab_strip_model()->GetActiveWebContents(); + web_contents_ = content::WebContents::Create( + content::WebContents::CreateParams(tab->GetBrowserContext())); + ASSERT_TRUE(web_contents_); + + // ManagePasswordsUIController needs ChromePasswordManagerClient for logging. + autofill::ChromeAutofillClient::CreateForWebContents(web_contents_); + ChromePasswordManagerClient::CreateForWebContentsWithAutofillClient( + web_contents_, + autofill::ChromeAutofillClient::FromWebContents(web_contents_)); + ASSERT_TRUE(ChromePasswordManagerClient::FromWebContents(web_contents_)); + CustomManagePasswordsUIController* controller = + new CustomManagePasswordsUIController(web_contents_); + browser()->tab_strip_model()->AppendWebContents(web_contents_, true); + browser()->tab_strip_model()->CloseWebContentsAt(0, + TabStripModel::CLOSE_NONE); + ASSERT_EQ(controller, + ManagePasswordsUIController::FromWebContents(web_contents_)); + ASSERT_EQ(web_contents_, + browser()->tab_strip_model()->GetActiveWebContents()); + ASSERT_FALSE(web_contents_->IsLoading()); } void PasswordManagerBrowserTestBase::TearDownOnMainThread() { @@ -183,7 +267,7 @@ } content::WebContents* PasswordManagerBrowserTestBase::WebContents() { - return browser()->tab_strip_model()->GetActiveWebContents(); + return web_contents_; } content::RenderViewHost* PasswordManagerBrowserTestBase::RenderViewHost() { @@ -191,6 +275,8 @@ } void PasswordManagerBrowserTestBase::NavigateToFile(const std::string& path) { + ASSERT_EQ(web_contents_, + browser()->tab_strip_model()->GetActiveWebContents()); NavigationObserver observer(WebContents()); GURL url = embedded_test_server()->GetURL(path); ui_test_utils::NavigateToURL(browser(), url); @@ -220,8 +306,7 @@ // Spin the message loop to make sure the password store had a chance to save // the password. - base::RunLoop run_loop; - run_loop.RunUntilIdle(); + WaitForPasswordStore(); ASSERT_FALSE(password_store->IsEmpty()); NavigateToFile(filename);
diff --git a/chrome/browser/password_manager/password_manager_test_base.h b/chrome/browser/password_manager/password_manager_test_base.h index 61e773b..af4f3ff 100644 --- a/chrome/browser/password_manager/password_manager_test_base.h +++ b/chrome/browser/password_manager/password_manager_test_base.h
@@ -8,10 +8,10 @@ #include <memory> #include "base/macros.h" +#include "base/run_loop.h" #include "chrome/test/base/in_process_browser_test.h" #include "components/password_manager/core/browser/password_store_consumer.h" #include "content/public/browser/web_contents_observer.h" -#include "content/public/test/test_utils.h" #include "net/cert/mock_cert_verifier.h" #include "net/test/embedded_test_server/embedded_test_server.h" @@ -53,7 +53,7 @@ std::string wait_for_path_; content::RenderFrameHost* render_frame_host_; bool quit_on_entry_committed_; - scoped_refptr<content::MessageLoopRunner> message_loop_runner_; + base::RunLoop run_loop_; DISALLOW_COPY_AND_ASSIGN(NavigationObserver); }; @@ -83,6 +83,11 @@ // observed form. Checks that the prompt is no longer visible afterwards. void AcceptUpdatePrompt(const autofill::PasswordForm& form) const; + // Returns once the account chooser pops up or it's already shown. + // |web_contents| must be the custom one returned by + // PasswordManagerBrowserTestBase. + void WaitForAccountChooser() const; + private: ManagePasswordsUIController* const passwords_ui_controller_; @@ -143,6 +148,7 @@ void AddHSTSHost(const std::string& host); // Accessors + // Return the first created tab with a custom ManagePasswordsUIController. content::WebContents* WebContents(); content::RenderViewHost* RenderViewHost(); net::EmbeddedTestServer& https_test_server() { return https_test_server_; } @@ -151,6 +157,8 @@ private: net::EmbeddedTestServer https_test_server_; net::MockCertVerifier mock_cert_verifier_; + // A tab with some hooks injected. + content::WebContents* web_contents_; DISALLOW_COPY_AND_ASSIGN(PasswordManagerBrowserTestBase); };
diff --git a/chrome/browser/profiles/profile_manager_browsertest.cc b/chrome/browser/profiles/profile_manager_browsertest.cc index f0287d7..1c5fec1 100644 --- a/chrome/browser/profiles/profile_manager_browsertest.cc +++ b/chrome/browser/profiles/profile_manager_browsertest.cc
@@ -7,8 +7,12 @@ #include "base/bind.h" #include "base/command_line.h" #include "base/macros.h" +#include "base/memory/ptr_util.h" +#include "base/stl_util.h" #include "base/strings/utf_string_conversions.h" #include "build/build_config.h" +#include "chrome/browser/lifetime/keep_alive_types.h" +#include "chrome/browser/lifetime/scoped_keep_alive.h" #include "chrome/browser/password_manager/password_store_factory.h" #include "chrome/browser/profiles/profile_attributes_entry.h" #include "chrome/browser/profiles/profile_attributes_storage.h" @@ -59,6 +63,39 @@ base::MessageLoop::current()->QuitWhenIdle(); } +// An observer returns back to test code after one or more profiles was deleted. +// It has ScopedKeepAlive object to prevent browser shutdown started in case +// browser become windowless. +class MultipleProfileDeletionObserver { + public: + explicit MultipleProfileDeletionObserver(size_t callbacks_calls_expected) + : callback_calls_left_(callbacks_calls_expected) { + EXPECT_LT(0u, callback_calls_left_); + } + ProfileManager::CreateCallback QuitAttemptClosure() { + return base::Bind(&MultipleProfileDeletionObserver::QuitAttempt, + base::Unretained(this)); + } + void Wait() { + keep_alive_ = base::MakeUnique<ScopedKeepAlive>( + KeepAliveOrigin::PROFILE_HELPER, KeepAliveRestartOption::DISABLED); + loop_.Run(); + } + + private: + void QuitAttempt(Profile* profile, Profile::CreateStatus status) { + EXPECT_EQ(Profile::CREATE_STATUS_INITIALIZED, status); + if (--callback_calls_left_) + return; + keep_alive_.reset(nullptr); + loop_.Quit(); + } + + base::RunLoop loop_; + size_t callback_calls_left_; + std::unique_ptr<ScopedKeepAlive> keep_alive_; +}; + void EphemeralProfileCreationComplete(Profile* profile, Profile::CreateStatus status) { if (status == Profile::CREATE_STATUS_INITIALIZED) @@ -80,14 +117,21 @@ std::string last_used_profile_name() { return last_used_profile_name_; } + void set_on_profile_removal_callback(const base::Closure& callback) { + on_profile_removal_callback_ = callback; + } + // ProfileAttributesStorage::Observer overrides: void OnProfileWillBeRemoved(const base::FilePath& profile_path) override { last_used_profile_name_ = g_browser_process->local_state()->GetString( prefs::kProfileLastUsed); + if (!on_profile_removal_callback_.is_null()) + on_profile_removal_callback_.Run(); } private: std::string last_used_profile_name_; + base::Closure on_profile_removal_callback_; DISALLOW_COPY_AND_ASSIGN(ProfileRemovalObserver); }; @@ -152,7 +196,9 @@ } }; -#if defined(OS_MACOSX) +// Android does not support multi-profiles, and CrOS multi-profiles +// implementation is too different for these tests. +#if !defined(OS_ANDROID) && !defined(OS_CHROMEOS) // Delete single profile and make sure a new one is created. IN_PROC_BROWSER_TEST_F(ProfileManagerBrowserTest, DeleteSingletonProfile) { @@ -168,14 +214,13 @@ base::FilePath singleton_profile_path = storage.GetAllProfilesAttributes().front()->GetPath(); EXPECT_FALSE(singleton_profile_path.empty()); - base::RunLoop run_loop; + MultipleProfileDeletionObserver profile_deletion_observer(1u); profile_manager->ScheduleProfileForDeletion( - singleton_profile_path, - base::Bind(&OnUnblockOnProfileCreation, &run_loop)); + singleton_profile_path, profile_deletion_observer.QuitAttemptClosure()); // Run the message loop until the profile is actually deleted (as indicated // by the callback above being called). - run_loop.Run(); + profile_deletion_observer.Wait(); // Make sure a new profile was created automatically. EXPECT_EQ(1u, storage.GetNumberOfProfiles()); @@ -194,10 +239,78 @@ EXPECT_EQ(last_used_profile_name, observer.last_used_profile_name()); } +// Delete inactive profile in a multi profile setup and make sure current +// browser is not affected. +IN_PROC_BROWSER_TEST_F(ProfileManagerBrowserTest, DeleteInactiveProfile) { + ProfileManager* profile_manager = g_browser_process->profile_manager(); + ProfileAttributesStorage& storage = + profile_manager->GetProfileAttributesStorage(); + base::FilePath current_profile_path = browser()->profile()->GetPath(); + + // Create an additional profile. + base::FilePath new_path = profile_manager->GenerateNextProfileDirectoryPath(); + base::RunLoop run_loop; + profile_manager->CreateProfileAsync( + new_path, base::Bind(&OnUnblockOnProfileCreation, &run_loop), + base::string16(), std::string(), std::string()); + run_loop.Run(); + + ASSERT_EQ(2u, storage.GetNumberOfProfiles()); + + // Delete inactive profile. + base::RunLoop loop; + ProfileRemovalObserver observer; + observer.set_on_profile_removal_callback(loop.QuitClosure()); + profile_manager->ScheduleProfileForDeletion(new_path, + ProfileManager::CreateCallback()); + loop.Run(); + + // Make sure there only preexisted profile left. + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); + EXPECT_EQ(current_profile_path, + storage.GetAllProfilesAttributes().front()->GetPath()); + + // Make sure that last used profile preference is set correctly. + Profile* last_used = ProfileManager::GetLastUsedProfile(); + EXPECT_EQ(current_profile_path, last_used->GetPath()); +} + +// Delete current profile in a multi profile setup and make sure an existing one +// is loaded. +IN_PROC_BROWSER_TEST_F(ProfileManagerBrowserTest, DeleteCurrentProfile) { + ProfileManager* profile_manager = g_browser_process->profile_manager(); + ProfileAttributesStorage& storage = + profile_manager->GetProfileAttributesStorage(); + + // Create an additional profile. + base::FilePath new_path = profile_manager->GenerateNextProfileDirectoryPath(); + base::RunLoop run_loop; + profile_manager->CreateProfileAsync( + new_path, base::Bind(&OnUnblockOnProfileCreation, &run_loop), + base::string16(), std::string(), std::string()); + run_loop.Run(); + + ASSERT_EQ(2u, storage.GetNumberOfProfiles()); + + // Delete current profile. + MultipleProfileDeletionObserver profile_deletion_observer(1u); + profile_manager->ScheduleProfileForDeletion( + browser()->profile()->GetPath(), + profile_deletion_observer.QuitAttemptClosure()); + profile_deletion_observer.Wait(); + + // Make sure a profile created earlier become the only profile. + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); + EXPECT_EQ(new_path, storage.GetAllProfilesAttributes().front()->GetPath()); + + // Make sure that last used profile preference is set correctly. + Profile* last_used = ProfileManager::GetLastUsedProfile(); + EXPECT_EQ(new_path, last_used->GetPath()); +} + // Delete all profiles in a multi profile setup and make sure a new one is // created. -// Crashes/CHECKs. See crbug.com/104851 -IN_PROC_BROWSER_TEST_F(ProfileManagerBrowserTest, DISABLED_DeleteAllProfiles) { +IN_PROC_BROWSER_TEST_F(ProfileManagerBrowserTest, DeleteAllProfiles) { ProfileManager* profile_manager = g_browser_process->profile_manager(); ProfileAttributesStorage& storage = profile_manager->GetProfileAttributesStorage(); @@ -216,6 +329,7 @@ ASSERT_EQ(2u, storage.GetNumberOfProfiles()); // Delete all profiles. + MultipleProfileDeletionObserver profile_deletion_observer(2u); std::vector<ProfileAttributesEntry*> entries = storage.GetAllProfilesAttributes(); std::vector<base::FilePath> old_profile_paths; @@ -223,12 +337,10 @@ base::FilePath profile_path = entry->GetPath(); EXPECT_FALSE(profile_path.empty()); profile_manager->ScheduleProfileForDeletion( - profile_path, ProfileManager::CreateCallback()); + profile_path, profile_deletion_observer.QuitAttemptClosure()); old_profile_paths.push_back(profile_path); } - - // Spin things so deletion can take place. - content::RunAllPendingInMessageLoop(); + profile_deletion_observer.Wait(); // Make sure a new profile was created automatically. EXPECT_EQ(1u, storage.GetNumberOfProfiles()); @@ -241,7 +353,7 @@ Profile* last_used = ProfileManager::GetLastUsedProfile(); EXPECT_EQ(new_profile_path, last_used->GetPath()); } -#endif // OS_MACOSX +#endif // !defined(OS_ANDROID) && !defined(OS_CHROMEOS) #if defined(OS_CHROMEOS)
diff --git a/chrome/browser/resources/md_bookmarks/bookmarks_store.js b/chrome/browser/resources/md_bookmarks/bookmarks_store.js deleted file mode 100644 index 077c6db5..0000000 --- a/chrome/browser/resources/md_bookmarks/bookmarks_store.js +++ /dev/null
@@ -1,100 +0,0 @@ -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -/** - * @fileoverview A singleton datastore for the Bookmarks page. Page state is - * publicly readable, but can only be modified by dispatching an Action to - * the store. - */ - -cr.define('bookmarks', function() { - /** @constructor */ - function Store() { - /** @type {!BookmarksPageState} */ - this.data_ = bookmarks.util.createEmptyState(); - /** @type {boolean} */ - this.initialized_ = false; - /** @type {!Array<!Action>} */ - this.queuedActions_ = []; - /** @type {!Array<!StoreObserver>} */ - this.observers_ = []; - } - - Store.prototype = { - /** - * @param {!BookmarksPageState} initialState - */ - init: function(initialState) { - this.data_ = initialState; - - this.queuedActions_.forEach(function(action) { - this.reduce_(action); - }.bind(this)); - - this.initialized_ = true; - this.notifyObservers_(this.data_); - }, - - /** @type {!BookmarksPageState} */ - get data() { - return this.data_; - }, - - /** @return {boolean} */ - isInitialized: function() { - return this.initialized_; - }, - - /** @param {!StoreObserver} observer */ - addObserver: function(observer) { - this.observers_.push(observer); - }, - - /** @param {!StoreObserver} observer */ - removeObserver: function(observer) { - var index = this.observers_.indexOf(observer); - this.observers_.splice(index, 1); - }, - - /** - * Transition to a new UI state based on the supplied |action|, and notify - * observers of the change. If the Store has not yet been initialized, the - * action will be queued and performed upon initialization. - * @param {Action} action - */ - handleAction: function(action) { - if (!this.initialized_) { - this.queuedActions_.push(action); - return; - } - - this.reduce_(action); - this.notifyObservers_(this.data_); - }, - - /** - * @param {Action} action - * @private - */ - reduce_: function(action) { - this.data_ = bookmarks.reduceAction(this.data_, action); - }, - - /** - * @param {!BookmarksPageState} state - * @private - */ - notifyObservers_: function(state) { - this.observers_.forEach(function(o) { - o.onStateChanged(state); - }); - }, - }; - - cr.addSingletonGetter(Store); - - return { - Store: Store, - }; -});
diff --git a/chrome/browser/resources/md_bookmarks/compiled_resources2.gyp b/chrome/browser/resources/md_bookmarks/compiled_resources2.gyp index b2e1a70..5afa9d4e9 100644 --- a/chrome/browser/resources/md_bookmarks/compiled_resources2.gyp +++ b/chrome/browser/resources/md_bookmarks/compiled_resources2.gyp
@@ -19,7 +19,7 @@ '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr', '<(EXTERNS_GYP):chrome_extensions', 'actions', - 'bookmarks_store', + 'store', 'util', ], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'], @@ -31,21 +31,12 @@ '<(EXTERNS_GYP):chrome_extensions', 'api_listener', 'router', + 'store', 'store_client', ], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'], }, { - 'target_name': 'bookmarks_store', - 'dependencies': [ - '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr', - '<(EXTERNS_GYP):chrome_extensions', - 'reducers', - 'types', - ], - 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'] - }, - { 'target_name': 'edit_dialog', 'dependencies': [ '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:assert', @@ -83,7 +74,6 @@ '<(EXTERNS_GYP):chrome_extensions', 'actions', 'edit_dialog', - 'item', 'store_client', ], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'], @@ -115,8 +105,10 @@ { 'target_name': 'store', 'dependencies': [ + '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr', '<(EXTERNS_GYP):chrome_extensions', - 'router', + 'reducers', + 'types', ], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi'] }, @@ -124,7 +116,7 @@ 'target_name': 'store_client', 'dependencies': [ '<(DEPTH)/ui/webui/resources/js/compiled_resources2.gyp:cr', - 'bookmarks_store', + 'store', 'types', ], 'includes': ['../../../../third_party/closure_compiler/compile_js2.gypi']
diff --git a/chrome/browser/resources/md_bookmarks/store.html b/chrome/browser/resources/md_bookmarks/store.html index 42fcd559d..a5f7057d 100644 --- a/chrome/browser/resources/md_bookmarks/store.html +++ b/chrome/browser/resources/md_bookmarks/store.html
@@ -1,17 +1,4 @@ -<!-- New-style store: --> <link rel="import" href="chrome://resources/html/cr.html"> <link rel="import" href="chrome://bookmarks/reducers.html"> <link rel="import" href="chrome://bookmarks/util.html"> -<script src="chrome://bookmarks/bookmarks_store.js"></script> - -<!-- Old-style store: --> -<!-- TODO(tsergeant): Remove this version of the store once it is unused. --> -<link rel="import" href="chrome://resources/html/polymer.html"> -<link rel="import" href="chrome://bookmarks/router.html"> -<dom-module id="bookmarks-store"> - <template> - <bookmarks-router id="router" search-term="[[searchTerm]]" - selected-id="[[selectedId]]"></bookmarks-router> - </template> - <script src="chrome://bookmarks/store.js"></script> -</dom-module> +<script src="chrome://bookmarks/store.js"></script>
diff --git a/chrome/browser/resources/md_bookmarks/store.js b/chrome/browser/resources/md_bookmarks/store.js index bb4334b..077c6db5 100644 --- a/chrome/browser/resources/md_bookmarks/store.js +++ b/chrome/browser/resources/md_bookmarks/store.js
@@ -1,465 +1,100 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. +// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -var BookmarksStore = Polymer({ - is: 'bookmarks-store', +/** + * @fileoverview A singleton datastore for the Bookmarks page. Page state is + * publicly readable, but can only be modified by dispatching an Action to + * the store. + */ - properties: { - /** @type {BookmarkTreeNode} */ - rootNode: { - type: Object, - notify: true, +cr.define('bookmarks', function() { + /** @constructor */ + function Store() { + /** @type {!BookmarksPageState} */ + this.data_ = bookmarks.util.createEmptyState(); + /** @type {boolean} */ + this.initialized_ = false; + /** @type {!Array<!Action>} */ + this.queuedActions_ = []; + /** @type {!Array<!StoreObserver>} */ + this.observers_ = []; + } + + Store.prototype = { + /** + * @param {!BookmarksPageState} initialState + */ + init: function(initialState) { + this.data_ = initialState; + + this.queuedActions_.forEach(function(action) { + this.reduce_(action); + }.bind(this)); + + this.initialized_ = true; + this.notifyObservers_(this.data_); }, - /** @type {?string} */ - selectedId: { - type: String, - observer: 'updateSelectedDisplay_', - notify: true, + /** @type {!BookmarksPageState} */ + get data() { + return this.data_; }, - searchTerm: { - type: String, - value: '', - observer: 'updateSearchDisplay_', - notify: true, + /** @return {boolean} */ + isInitialized: function() { + return this.initialized_; + }, + + /** @param {!StoreObserver} observer */ + addObserver: function(observer) { + this.observers_.push(observer); + }, + + /** @param {!StoreObserver} observer */ + removeObserver: function(observer) { + var index = this.observers_.indexOf(observer); + this.observers_.splice(index, 1); }, /** - * This updates to either the result of a search or the contents of the - * selected folder. - * @type {Array<BookmarkTreeNode>} + * Transition to a new UI state based on the supplied |action|, and notify + * observers of the change. If the Store has not yet been initialized, the + * action will be queued and performed upon initialization. + * @param {Action} action */ - displayedList: { - type: Array, - notify: true, - readOnly: true, + handleAction: function(action) { + if (!this.initialized_) { + this.queuedActions_.push(action); + return; + } + + this.reduce_(action); + this.notifyObservers_(this.data_); }, - /** @type {Object<?string, !BookmarkTreeNode>} */ - idToNodeMap_: Object, + /** + * @param {Action} action + * @private + */ + reduce_: function(action) { + this.data_ = bookmarks.reduceAction(this.data_, action); + }, - /** @type {?number} */ - anchorIndex_: Number, + /** + * @param {!BookmarksPageState} state + * @private + */ + notifyObservers_: function(state) { + this.observers_.forEach(function(o) { + o.onStateChanged(state); + }); + }, + }; - /** @type {Set<string>} */ - searchResultSet_: Object, - }, + cr.addSingletonGetter(Store); - /** @private {Object} */ - documentListeners_: null, - - /** @override */ - attached: function() { - this.documentListeners_ = { - 'folder-open-changed': this.onFolderOpenChanged_.bind(this), - 'search-term-changed': this.onSearchTermChanged_.bind(this), - 'select-item': this.onItemSelected_.bind(this), - 'selected-folder-changed': this.onSelectedFolderChanged_.bind(this), - }; - for (var event in this.documentListeners_) - document.addEventListener(event, this.documentListeners_[event]); - }, - - /** @override */ - detached: function() { - for (var event in this.documentListeners_) - document.removeEventListener(event, this.documentListeners_[event]); - }, - - /** - * Initializes the store with data from the bookmarks API. - * Called by app on attached. - */ - initializeStore: function() { - chrome.bookmarks.getTree(function(results) { - this.setupStore_(results[0]); - }.bind(this)); - // Attach bookmarks API listeners. - chrome.bookmarks.onRemoved.addListener(this.onBookmarkRemoved_.bind(this)); - chrome.bookmarks.onChanged.addListener(this.onBookmarkChanged_.bind(this)); - chrome.bookmarks.onImportBegan.addListener(this.onImportBegan_.bind(this)); - chrome.bookmarks.onImportEnded.addListener(this.onImportEnded_.bind(this)); - }, - - ////////////////////////////////////////////////////////////////////////////// - // bookmarks-store, private: - - /** - * @param {BookmarkTreeNode} rootNode - * @private - */ - setupStore_: function(rootNode) { - this.rootNode = rootNode; - this.idToNodeMap_ = {}; - this.rootNode.path = 'rootNode'; - BookmarksStore.generatePaths(rootNode, 0); - BookmarksStore.initNodes(this.rootNode, this.idToNodeMap_); - - // Initialize the store's fields from the router. - if (this.$.router.searchTerm) - this.searchTerm = this.$.router.searchTerm; - else - this.fire('selected-folder-changed', this.$.router.selectedId); - }, - - /** @private */ - deselectFolders_: function() { - this.unlinkPaths('displayedList'); - this.set( - this.idToNodeMap_[this.selectedId].path + '.isSelectedFolder', false); - this.selectedId = null; - }, - - /** - * @param {BookmarkTreeNode} folder - * @private - * @return {boolean} - */ - isAncestorOfSelected_: function(folder) { - if (!this.selectedId) - return false; - - var selectedNode = this.idToNodeMap_[this.selectedId]; - return selectedNode.path.startsWith(folder.path); - }, - - /** @private */ - updateSearchDisplay_: function() { - if (!this.rootNode) - return; - - if (!this.searchTerm) { - this.fire('selected-folder-changed', this.rootNode.children[0].id); - } else { - chrome.bookmarks.search(this.searchTerm, function(results) { - this.anchorIndex_ = null; - this.clearSelectedItems_(); - this.searchResultSet_ = new Set(); - - if (this.selectedId) - this.deselectFolders_(); - - this.setupSearchResults_(results); - }.bind(this)); - } - }, - - /** @private */ - updateSelectedDisplay_: function() { - // Don't change to the selected display if ID was cleared. - if (!this.selectedId) - return; - - this.clearSelectedItems_(); - this.anchorIndex_ = null; - - var selectedNode = this.idToNodeMap_[this.selectedId]; - this.linkPaths('displayedList', selectedNode.path + '.children'); - this._setDisplayedList( - /** @type {Array<BookmarkTreeNode>} */ (selectedNode.children)); - }, - - /** - * Remove all descendants of a given node from the map. - * @param {string} id - * @private - */ - removeDescendantsFromMap_: function(id) { - var node = this.idToNodeMap_[id]; - if (!node) - return; - - if (node.children) { - for (var i = 0; i < node.children.length; i++) - this.removeDescendantsFromMap_(node.children[i].id); - } - delete this.idToNodeMap_[id]; - }, - - /** - * Remove all selected items in the list. - * @private - */ - clearSelectedItems_: function() { - if (!this.displayedList) - return; - - for (var i = 0; i < this.displayedList.length; i++) { - if (!this.displayedList[i].isSelectedItem) - continue; - - this.set('displayedList.#' + i + '.isSelectedItem', false); - } - }, - - /** - * Return the index in the search result of an item. - * @param {BookmarkTreeNode} item - * @return {number} - * @private - */ - getIndexInList_: function(item) { - return this.searchTerm ? item.searchResultIndex : item.index; - }, - - /** - * @param {string} id - * @return {boolean} - * @private - */ - isInDisplayedList_: function(id) { - return this.searchTerm ? this.searchResultSet_.has(id) : - this.idToNodeMap_[id].parentId == this.selectedId; - }, - - /** - * Initializes the search results returned by the API as follows: - * - Populates |searchResultSet_| with a mapping of all result ids to - * their corresponding result. - * - Sets up the |searchResultIndex|. - * @param {Array<BookmarkTreeNode>} results - * @private - */ - setupSearchResults_: function(results) { - for (var i = 0; i < results.length; i++) { - results[i].searchResultIndex = i; - results[i].isSelectedItem = false; - this.searchResultSet_.add(results[i].id); - } - - this._setDisplayedList(results); - }, - - /** - * Select multiple items based on |anchorIndex_| and the selected - * item. If |anchorIndex_| is not set, single select the item. - * @param {BookmarkTreeNode} item - * @private - */ - selectRange_: function(item) { - var startIndex, endIndex; - if (this.anchorIndex_ == null) { - this.anchorIndex_ = this.getIndexInList_(item); - startIndex = this.anchorIndex_; - endIndex = this.anchorIndex_; - } else { - var selectedIndex = this.getIndexInList_(item); - startIndex = Math.min(this.anchorIndex_, selectedIndex); - endIndex = Math.max(this.anchorIndex_, selectedIndex); - } - for (var i = startIndex; i <= endIndex; i++) - this.set('displayedList.#' + i + '.isSelectedItem', true); - }, - - /** - * Selects a single item in the displayedList. - * @param {BookmarkTreeNode} item - * @private - */ - selectItem_: function(item) { - this.anchorIndex_ = this.getIndexInList_(item); - this.set('displayedList.#' + this.anchorIndex_ + '.isSelectedItem', true); - }, - - ////////////////////////////////////////////////////////////////////////////// - // bookmarks-store, bookmarks API event listeners: - - /** - * Callback for when a bookmark node is removed. - * If a folder is selected or is an ancestor of a selected folder, the parent - * of the removed folder will be selected. - * @param {string} id The id of the removed bookmark node. - * @param {!{index: number, - * parentId: string, - * node: BookmarkTreeNode}} removeInfo - */ - onBookmarkRemoved_: function(id, removeInfo) { - chrome.bookmarks.getSubTree(removeInfo.parentId, function(parentNodes) { - var parentNode = parentNodes[0]; - var isAncestor = this.isAncestorOfSelected_(this.idToNodeMap_[id]); - var wasInDisplayedList = this.isInDisplayedList_(id); - - // Refresh the parent node's data from the backend as its children's - // indexes will have changed and Polymer doesn't update them. - this.removeDescendantsFromMap_(id); - parentNode.path = this.idToNodeMap_[parentNode.id].path; - BookmarksStore.generatePaths(parentNode, 0); - BookmarksStore.initNodes(parentNode, this.idToNodeMap_); - this.set(parentNode.path, parentNode); - - // Updates selectedId if the removed node is an ancestor of the current - // selected node. - if (isAncestor) - this.fire('selected-folder-changed', removeInfo.parentId); - - // Only update the displayedList if the removed node is in the - // displayedList. - if (!wasInDisplayedList) - return; - - this.anchorIndex_ = null; - - // Update the currently displayed list. - if (this.searchTerm) { - this.updateSearchDisplay_(); - } else { - if (!isAncestor) - this.fire('selected-folder-changed', this.selectedId); - - this._setDisplayedList(parentNode.children); - } - }.bind(this)); - }, - - /** - * Called when the title of a bookmark changes. - * @param {string} id The id of changed bookmark node. - * @param {!Object} changeInfo - */ - onBookmarkChanged_: function(id, changeInfo) { - if (changeInfo.title) - this.set(this.idToNodeMap_[id].path + '.title', changeInfo.title); - if (changeInfo.url) - this.set(this.idToNodeMap_[id].path + '.url', changeInfo.url); - - if (this.searchTerm) - this.updateSearchDisplay_(); - }, - - /** - * Called when importing bookmark is started. - */ - onImportBegan_: function() { - // TODO(rongjie): pause onCreated once this event is used. - }, - - /** - * Called when importing bookmark node is finished. - */ - onImportEnded_: function() { - chrome.bookmarks.getTree(function(results) { - this.setupStore_(results[0]); - this.updateSelectedDisplay_(); - }.bind(this)); - }, - - ////////////////////////////////////////////////////////////////////////////// - // bookmarks-store, bookmarks app event listeners: - - /** - * @param {Event} e - * @private - */ - onSearchTermChanged_: function(e) { - this.searchTerm = /** @type {string} */ (e.detail); - }, - - /** - * Selects the folder specified by the event and deselects the previously - * selected folder. - * @param {CustomEvent} e - * @private - */ - onSelectedFolderChanged_: function(e) { - if (this.searchTerm) - this.searchTerm = ''; - - // Deselect the old folder if defined. - if (this.selectedId && this.idToNodeMap_[this.selectedId]) - this.set( - this.idToNodeMap_[this.selectedId].path + '.isSelectedFolder', false); - - // Check if the selected id is that of a defined folder. - var id = /** @type {string} */ (e.detail); - if (!this.idToNodeMap_[id] || this.idToNodeMap_[id].url) - id = this.rootNode.children[0].id; - - var folder = this.idToNodeMap_[id]; - this.set(folder.path + '.isSelectedFolder', true); - this.selectedId = id; - - if (folder.id == this.rootNode.id) - return; - - var parent = this.idToNodeMap_[/** @type {?string} */ (folder.parentId)]; - while (parent) { - if (!parent.isOpen) { - this.fire('folder-open-changed', { - id: parent.id, - open: true, - }); - } - - parent = this.idToNodeMap_[/** @type {?string} */ (parent.parentId)]; - } - }, - - /** - * Handles events that open and close folders. - * @param {CustomEvent} e - * @private - */ - onFolderOpenChanged_: function(e) { - var folder = this.idToNodeMap_[e.detail.id]; - this.set(folder.path + '.isOpen', e.detail.open); - if (!folder.isOpen && this.isAncestorOfSelected_(folder)) - this.fire('selected-folder-changed', folder.id); - }, - - /** - * Selects items according to keyboard behaviours. - * @param {CustomEvent} e - * @private - */ - onItemSelected_: function(e) { - if (!e.detail.add) - this.clearSelectedItems_(); - - if (e.detail.range) - this.selectRange_(e.detail.item); - else - this.selectItem_(e.detail.item); - }, + return { + Store: Store, + }; }); - -//////////////////////////////////////////////////////////////////////////////// -// bookmarks-store, static methods: - -/** - * Stores the path from the store to a node inside the node. - * @param {BookmarkTreeNode} bookmarkNode - * @param {number} startIndex - */ -BookmarksStore.generatePaths = function(bookmarkNode, startIndex) { - if (!bookmarkNode.children) - return; - - for (var i = startIndex; i < bookmarkNode.children.length; i++) { - bookmarkNode.children[i].path = bookmarkNode.path + '.children.#' + i; - BookmarksStore.generatePaths(bookmarkNode.children[i], 0); - } -}; - -/** - * Initializes the nodes in the bookmarks tree as follows: - * - Populates |idToNodeMap_| with a mapping of all node ids to their - * corresponding BookmarkTreeNode. - * - Sets all the nodes to not selected and open by default. - * @param {BookmarkTreeNode} bookmarkNode - * @param {Object=} idToNodeMap - */ -BookmarksStore.initNodes = function(bookmarkNode, idToNodeMap) { - bookmarkNode.isSelectedItem = false; - if (idToNodeMap) - idToNodeMap[bookmarkNode.id] = bookmarkNode; - - if (bookmarkNode.url) - return; - - bookmarkNode.isSelectedFolder = false; - bookmarkNode.isOpen = true; - for (var i = 0; i < bookmarkNode.children.length; i++) - BookmarksStore.initNodes(bookmarkNode.children[i], idToNodeMap); -};
diff --git a/chrome/browser/safe_browsing/incident_reporting/extension_data_collection_unittest.cc b/chrome/browser/safe_browsing/incident_reporting/extension_data_collection_unittest.cc index 07fadbc4..4d9e89d8 100644 --- a/chrome/browser/safe_browsing/incident_reporting/extension_data_collection_unittest.cc +++ b/chrome/browser/safe_browsing/incident_reporting/extension_data_collection_unittest.cc
@@ -100,9 +100,10 @@ extension_prefs_->UpdateExtensionPref( extension_id, "install_time", - new base::Value(base::Int64ToString(install_time.ToInternalValue()))); - extension_prefs_->UpdateExtensionPref(extension_id, "state", - new base::Value(state_value)); + base::MakeUnique<base::Value>( + base::Int64ToString(install_time.ToInternalValue()))); + extension_prefs_->UpdateExtensionPref( + extension_id, "state", base::MakeUnique<base::Value>(state_value)); } void ExtensionTestingProfile::SetInstallSignature(
diff --git a/chrome/browser/signin/account_reconcilor_unittest.cc b/chrome/browser/signin/account_reconcilor_unittest.cc index 8e6c39b..8c9304c 100644 --- a/chrome/browser/signin/account_reconcilor_unittest.cc +++ b/chrome/browser/signin/account_reconcilor_unittest.cc
@@ -85,7 +85,7 @@ } // namespace -class AccountReconcilorTest : public ::testing::TestWithParam<bool> { +class AccountReconcilorTest : public ::testing::Test { public: AccountReconcilorTest(); void SetUp() override; @@ -158,13 +158,6 @@ url_fetcher_factory_(NULL) {} void AccountReconcilorTest::SetUp() { - // If it's a non-parameterized test, or we have a parameter of true, set flag. - if (!::testing::UnitTest::GetInstance()->current_test_info()->value_param() || - GetParam()) { - base::CommandLine::ForCurrentProcess()->AppendSwitch( - switches::kEnableNewProfileManagement); - } - get_check_connection_info_url_ = GaiaUrls::GetInstance()->GetCheckConnectionInfoURLWithSource( GaiaConstants::kChromeSource); @@ -375,7 +368,7 @@ reconcilor->GetState()); } -TEST_P(AccountReconcilorTest, StartReconcileNoop) { +TEST_F(AccountReconcilorTest, StartReconcileNoop) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); @@ -400,7 +393,7 @@ 1); } -TEST_P(AccountReconcilorTest, StartReconcileCookiesDisabled) { +TEST_F(AccountReconcilorTest, StartReconcileCookiesDisabled) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -421,7 +414,7 @@ ASSERT_FALSE(reconcilor->is_reconcile_started_); } -TEST_P(AccountReconcilorTest, StartReconcileContentSettings) { +TEST_F(AccountReconcilorTest, StartReconcileContentSettings) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -441,7 +434,7 @@ ASSERT_TRUE(reconcilor->is_reconcile_started_); } -TEST_P(AccountReconcilorTest, StartReconcileContentSettingsGaiaUrl) { +TEST_F(AccountReconcilorTest, StartReconcileContentSettingsGaiaUrl) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -456,7 +449,7 @@ ASSERT_TRUE(reconcilor->is_reconcile_started_); } -TEST_P(AccountReconcilorTest, StartReconcileContentSettingsNonGaiaUrl) { +TEST_F(AccountReconcilorTest, StartReconcileContentSettingsNonGaiaUrl) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -471,7 +464,7 @@ ASSERT_FALSE(reconcilor->is_reconcile_started_); } -TEST_P(AccountReconcilorTest, StartReconcileContentSettingsInvalidPattern) { +TEST_F(AccountReconcilorTest, StartReconcileContentSettingsInvalidPattern) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -496,7 +489,7 @@ // tests makes sure that an email like "Dot.S@hmail.com", as seen by the // token service, will be considered the same as "dots@gmail.com" as returned // by gaia::ParseListAccountsData(). -TEST_P(AccountReconcilorTest, StartReconcileNoopWithDots) { +TEST_F(AccountReconcilorTest, StartReconcileNoopWithDots) { if (account_tracker()->GetMigrationState() != AccountTrackerService::MIGRATION_NOT_STARTED) { return; @@ -520,7 +513,7 @@ 1); } -TEST_P(AccountReconcilorTest, StartReconcileNoopMultiple) { +TEST_F(AccountReconcilorTest, StartReconcileNoopMultiple) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); const std::string account_id2 = @@ -545,7 +538,7 @@ 1); } -TEST_P(AccountReconcilorTest, StartReconcileAddToCookie) { +TEST_F(AccountReconcilorTest, StartReconcileAddToCookie) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -623,7 +616,7 @@ #endif // !defined(OS_CHROMEOS) -TEST_P(AccountReconcilorTest, StartReconcileRemoveFromCookie) { +TEST_F(AccountReconcilorTest, StartReconcileRemoveFromCookie) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); token_service()->UpdateCredentials(account_id, "refresh_token"); @@ -652,7 +645,7 @@ "Signin.Reconciler.RemovedFromCookieJar.FirstRun", 1, 1); } -TEST_P(AccountReconcilorTest, StartReconcileAddToCookieTwice) { +TEST_F(AccountReconcilorTest, StartReconcileAddToCookieTwice) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); const std::string account_id2 = @@ -717,7 +710,7 @@ "Signin.Reconciler.RemovedFromCookieJar.SubsequentRun", 0, 1); } -TEST_P(AccountReconcilorTest, StartReconcileBadPrimary) { +TEST_F(AccountReconcilorTest, StartReconcileBadPrimary) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); const std::string account_id2 = @@ -753,7 +746,7 @@ "Signin.Reconciler.RemovedFromCookieJar.FirstRun", 0, 1); } -TEST_P(AccountReconcilorTest, StartReconcileOnlyOnce) { +TEST_F(AccountReconcilorTest, StartReconcileOnlyOnce) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); cookie_manager_service()->SetListAccountsResponseOneAccount( @@ -771,7 +764,7 @@ ASSERT_FALSE(reconcilor->is_reconcile_started_); } -TEST_P(AccountReconcilorTest, StartReconcileWithSessionInfoExpiredDefault) { +TEST_F(AccountReconcilorTest, StartReconcileWithSessionInfoExpiredDefault) { const std::string account_id = ConnectProfileToAccount("12345", "user@gmail.com"); const std::string account_id2 = @@ -908,7 +901,3 @@ ASSERT_FALSE(reconcilor->is_reconcile_started_); ASSERT_FALSE(reconcilor->error_during_last_reconcile_); } - -INSTANTIATE_TEST_CASE_P(AccountReconcilorMaybeEnabled, - AccountReconcilorTest, - testing::Bool());
diff --git a/chrome/browser/sync/test/integration/two_client_app_list_sync_test.cc b/chrome/browser/sync/test/integration/two_client_app_list_sync_test.cc index c8d22d29..fcf9958 100644 --- a/chrome/browser/sync/test/integration/two_client_app_list_sync_test.cc +++ b/chrome/browser/sync/test/integration/two_client_app_list_sync_test.cc
@@ -425,7 +425,7 @@ // Flag Default app in Profile 1. extensions::ExtensionPrefs::Get(GetProfile(1)) ->UpdateExtensionPref(default_app_id, "was_installed_by_default", - new base::Value(true)); + base::MakeUnique<base::Value>(true)); // Remove the default app in Profile 0 and verifier, ensure it was removed // in Profile 1.
diff --git a/chrome/browser/ui/app_list/extension_app_model_builder_unittest.cc b/chrome/browser/ui/app_list/extension_app_model_builder_unittest.cc index 968f669..0072e5f 100644 --- a/chrome/browser/ui/app_list/extension_app_model_builder_unittest.cc +++ b/chrome/browser/ui/app_list/extension_app_model_builder_unittest.cc
@@ -271,8 +271,9 @@ // Creates a corrupted ordinal case. extensions::ExtensionScopedPrefs* scoped_prefs = extensions::ExtensionPrefs::Get(profile_.get()); - scoped_prefs->UpdateExtensionPref(kHostedAppId, "page_ordinal", - new base::Value("a corrupted ordinal")); + scoped_prefs->UpdateExtensionPref( + kHostedAppId, "page_ordinal", + base::MakeUnique<base::Value>("a corrupted ordinal")); // This should not assert or crash. CreateBuilder();
diff --git a/chrome/browser/ui/views/frame/browser_frame_mus.cc b/chrome/browser/ui/views/frame/browser_frame_mus.cc index 7859590..44dbe82 100644 --- a/chrome/browser/ui/views/frame/browser_frame_mus.cc +++ b/chrome/browser/ui/views/frame/browser_frame_mus.cc
@@ -48,8 +48,8 @@ static_cast<int32_t>(ash::mojom::WindowStyle::BROWSER)); #endif std::unique_ptr<views::DesktopWindowTreeHostMus> desktop_window_tree_host = - base::MakeUnique<views::DesktopWindowTreeHostMus>(browser_frame_, this, - &properties); + base::MakeUnique<views::DesktopWindowTreeHostMus>( + browser_frame_, this, cc::FrameSinkId(), &properties); // BrowserNonClientFrameViewMus::OnBoundsChanged() takes care of updating // the insets. desktop_window_tree_host->set_auto_update_client_area(false);
diff --git a/chrome/browser/ui/webui/md_bookmarks/md_bookmarks_ui.cc b/chrome/browser/ui/webui/md_bookmarks/md_bookmarks_ui.cc index e370ea20..620ed65 100644 --- a/chrome/browser/ui/webui/md_bookmarks/md_bookmarks_ui.cc +++ b/chrome/browser/ui/webui/md_bookmarks/md_bookmarks_ui.cc
@@ -73,8 +73,6 @@ source->AddResourcePath("api_listener.js", IDR_MD_BOOKMARKS_API_LISTENER_JS); source->AddResourcePath("app.html", IDR_MD_BOOKMARKS_APP_HTML); source->AddResourcePath("app.js", IDR_MD_BOOKMARKS_APP_JS); - source->AddResourcePath("bookmarks_store.js", - IDR_MD_BOOKMARKS_BOOKMARKS_STORE_JS); source->AddResourcePath("edit_dialog.html", IDR_MD_BOOKMARKS_EDIT_DIALOG_HTML); source->AddResourcePath("edit_dialog.js", IDR_MD_BOOKMARKS_EDIT_DIALOG_JS);
diff --git a/chrome/browser/ui/webui/profile_helper_browsertest.cc b/chrome/browser/ui/webui/profile_helper_browsertest.cc new file mode 100644 index 0000000..aed22c8 --- /dev/null +++ b/chrome/browser/ui/webui/profile_helper_browsertest.cc
@@ -0,0 +1,190 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "base/memory/ptr_util.h" +#include "base/run_loop.h" +#include "base/scoped_observer.h" +#include "chrome/browser/browser_process.h" +#include "chrome/browser/chrome_notification_types.h" +#include "chrome/browser/profiles/profile_attributes_storage.h" +#include "chrome/browser/profiles/profile_manager.h" +#include "chrome/browser/ui/browser.h" +#include "chrome/browser/ui/browser_list.h" +#include "chrome/browser/ui/browser_list_observer.h" +#include "chrome/browser/ui/webui/profile_helper.h" +#include "chrome/test/base/in_process_browser_test.h" +#include "content/public/browser/notification_service.h" +#include "content/public/test/test_utils.h" +#include "content/public/test/test_web_ui.h" + +namespace { + +// An observer that returns back to test code after a new profile is +// initialized. +void UnblockOnProfileCreation(base::RunLoop* run_loop, + Profile* profile, + Profile::CreateStatus status) { + if (status == Profile::CREATE_STATUS_INITIALIZED) + run_loop->Quit(); +} + +Profile* CreateProfile() { + ProfileManager* profile_manager = g_browser_process->profile_manager(); + base::FilePath new_path = profile_manager->GenerateNextProfileDirectoryPath(); + base::RunLoop run_loop; + profile_manager->CreateProfileAsync( + new_path, base::Bind(&UnblockOnProfileCreation, &run_loop), + base::string16(), std::string(), std::string()); + run_loop.Run(); + return profile_manager->GetProfileByPath(new_path); +} + +// An observer returns back to test code after brower window associated with +// the profile is activated. +class ExpectBrowserActivationForProfile : public chrome::BrowserListObserver { + public: + explicit ExpectBrowserActivationForProfile(Profile* profile) + : profile_(profile), scoped_observer_(this) { + scoped_observer_.Add(BrowserList::GetInstance()); + } + + void Wait() { + loop_.Run(); + } + + protected: + void OnBrowserSetLastActive(Browser* browser) override { + if (browser->profile() == profile_) + loop_.Quit(); + } + + private: + Profile* profile_; + base::RunLoop loop_; + ScopedObserver<BrowserList, chrome::BrowserListObserver> scoped_observer_; +}; + +} // namespace + +using ProfileHelperTest = InProcessBrowserTest; + +IN_PROC_BROWSER_TEST_F(ProfileHelperTest, OpenNewWindowForProfile) { + BrowserList* browser_list = BrowserList::GetInstance(); + + Browser* original_browser = browser(); + Profile* original_profile = original_browser->profile(); + std::unique_ptr<ExpectBrowserActivationForProfile> activation_observer; + + // Sanity checks. + EXPECT_EQ(1u, browser_list->size()); + EXPECT_TRUE(base::ContainsValue(*browser_list, original_browser)); + + // Opening existing browser profile shouldn't open additional browser windows. + webui::OpenNewWindowForProfile(original_profile); + EXPECT_EQ(1u, browser_list->size()); + EXPECT_EQ(original_browser, browser_list->GetLastActive()); + + // Open additional browser will add new window and activates it. + Profile* additional_profile = CreateProfile(); + activation_observer = + base::MakeUnique<ExpectBrowserActivationForProfile>(additional_profile); + webui::OpenNewWindowForProfile(additional_profile); + EXPECT_EQ(2u, browser_list->size()); + activation_observer->Wait(); + EXPECT_EQ(additional_profile, browser_list->GetLastActive()->profile()); + +// On Macs OpenNewWindowForProfile does not activate existing browser +// while non of the browser windows have focus. BrowserWindowCocoa::Show() got +// the same issue as BrowserWindowCocoa::Activate(), and execute call +// BrowserList::SetLastActive() directly. Not sure if it is a bug or desired +// behaviour. +#if !defined(OS_MACOSX) + // Switch to original browser. Only LastActive should change. + activation_observer = + base::MakeUnique<ExpectBrowserActivationForProfile>(original_profile); + webui::OpenNewWindowForProfile(original_profile); + EXPECT_EQ(2u, browser_list->size()); + activation_observer->Wait(); + EXPECT_EQ(original_profile, browser_list->GetLastActive()->profile()); +#endif +} + +IN_PROC_BROWSER_TEST_F(ProfileHelperTest, DeleteSoleProfile) { + content::TestWebUI web_ui; + Browser* original_browser = browser(); + ProfileAttributesStorage& storage = + g_browser_process->profile_manager()->GetProfileAttributesStorage(); + + BrowserList* browser_list = BrowserList::GetInstance(); + EXPECT_EQ(1u, browser_list->size()); + EXPECT_TRUE(base::ContainsValue(*browser_list, original_browser)); + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); + + // Original browser will be closed, and browser with the new profile created. + content::WindowedNotificationObserver open_observer( + chrome::NOTIFICATION_BROWSER_OPENED, + content::NotificationService::AllSources()); + content::WindowedNotificationObserver close_observer( + chrome::NOTIFICATION_BROWSER_CLOSED, content::Source<Browser>(browser())); + webui::DeleteProfileAtPath(original_browser->profile()->GetPath(), &web_ui, + ProfileMetrics::DELETE_PROFILE_SETTINGS); + open_observer.Wait(); + close_observer.Wait(); + + EXPECT_EQ(1u, browser_list->size()); + EXPECT_FALSE(base::ContainsValue(*browser_list, original_browser)); + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); +} + +IN_PROC_BROWSER_TEST_F(ProfileHelperTest, DeleteActiveProfile) { + content::TestWebUI web_ui; + Browser* original_browser = browser(); + ProfileAttributesStorage& storage = + g_browser_process->profile_manager()->GetProfileAttributesStorage(); + + BrowserList* browser_list = BrowserList::GetInstance(); + EXPECT_EQ(1u, browser_list->size()); + EXPECT_TRUE(base::ContainsValue(*browser_list, original_browser)); + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); + + Profile* additional_profile = CreateProfile(); + EXPECT_EQ(2u, storage.GetNumberOfProfiles()); + + // Original browser will be closed, and browser with the new profile created. + content::WindowedNotificationObserver open_observer( + chrome::NOTIFICATION_BROWSER_OPENED, + content::NotificationService::AllSources()); + content::WindowedNotificationObserver close_observer( + chrome::NOTIFICATION_BROWSER_CLOSED, content::Source<Browser>(browser())); + webui::DeleteProfileAtPath(original_browser->profile()->GetPath(), &web_ui, + ProfileMetrics::DELETE_PROFILE_SETTINGS); + open_observer.Wait(); + close_observer.Wait(); + + EXPECT_EQ(1u, browser_list->size()); + EXPECT_EQ(additional_profile, browser_list->get(0)->profile()); + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); +} + +IN_PROC_BROWSER_TEST_F(ProfileHelperTest, DeleteInactiveProfile) { + content::TestWebUI web_ui; + Browser* original_browser = browser(); + ProfileAttributesStorage& storage = + g_browser_process->profile_manager()->GetProfileAttributesStorage(); + + BrowserList* browser_list = BrowserList::GetInstance(); + EXPECT_EQ(1u, browser_list->size()); + EXPECT_TRUE(base::ContainsValue(*browser_list, original_browser)); + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); + + Profile* additional_profile = CreateProfile(); + EXPECT_EQ(2u, storage.GetNumberOfProfiles()); + + webui::DeleteProfileAtPath(additional_profile->GetPath(), &web_ui, + ProfileMetrics::DELETE_PROFILE_SETTINGS); + + EXPECT_EQ(1u, browser_list->size()); + EXPECT_TRUE(base::ContainsValue(*browser_list, original_browser)); + EXPECT_EQ(1u, storage.GetNumberOfProfiles()); +}
diff --git a/chrome/test/BUILD.gn b/chrome/test/BUILD.gn index d465eb0..78a3cff 100644 --- a/chrome/test/BUILD.gn +++ b/chrome/test/BUILD.gn
@@ -1835,6 +1835,7 @@ "../browser/ui/webui/policy_ui_browsertest.cc", "../browser/ui/webui/prefs_internals_browsertest.cc", "../browser/ui/webui/print_preview/print_preview_ui_browsertest.cc", + "../browser/ui/webui/profile_helper_browsertest.cc", "../browser/ui/webui/set_as_default_browser_ui_browsertest_win.cc", "../browser/ui/webui/settings/md_settings_ui_browsertest.cc", "../browser/ui/webui/signin/inline_login_ui_browsertest.cc", @@ -2379,6 +2380,7 @@ # inline login UI is disabled on chromeos "../browser/ui/views/sync/profile_signin_confirmation_dialog_views_browsertest.cc", + "../browser/ui/webui/profile_helper_browsertest.cc", "../browser/ui/webui/signin/inline_login_ui_browsertest.cc", # chromeos does not use the desktop user manager
diff --git a/chrome/test/data/extensions/api_test/native_bindings/extension/background.js b/chrome/test/data/extensions/api_test/native_bindings/extension/background.js index 14829480..4312809 100644 --- a/chrome/test/data/extensions/api_test/native_bindings/extension/background.js +++ b/chrome/test/data/extensions/api_test/native_bindings/extension/background.js
@@ -206,47 +206,6 @@ chrome.test.succeed(); }); }, - function testWebNavigationAndFilteredEvents() { - // Tests unfiltered events, which can be exercised with the webNavigation - // API. - var unfiltered = new Promise((resolve, reject) => { - var sawSimple1 = false; - var sawSimple2 = false; - chrome.webNavigation.onBeforeNavigate.addListener( - function listener(details) { - if (details.url.indexOf('simple.html') != -1) - sawSimple1 = true; - else if (details.url.indexOf('simple2.html') != -1) - sawSimple2 = true; - else - chrome.test.fail(details.url); - - if (sawSimple1 && sawSimple2) { - chrome.webNavigation.onBeforeNavigate.removeListener(listener); - resolve(); - } - }); - }); - - var filtered = new Promise((resolve, reject) => { - chrome.webNavigation.onBeforeNavigate.addListener( - function listener(details) { - chrome.test.assertTrue(details.url.indexOf('simple2.html') != -1, - details.url); - chrome.webNavigation.onBeforeNavigate.removeListener(listener); - resolve(); - }, {url: [{pathContains: 'simple2.html'}]}); - }); - - var url1 = - 'http://example.com:' + portNumber + '/native_bindings/simple.html'; - var url2 = - 'http://example.com:' + portNumber + '/native_bindings/simple2.html'; - chrome.tabs.create({url: url1}); - chrome.tabs.create({url: url2}); - - Promise.all([unfiltered, filtered]).then(() => { chrome.test.succeed(); }); - }, ]; chrome.test.getConfig(config => {
diff --git a/chrome/test/data/extensions/api_test/native_bindings/extension/manifest.json b/chrome/test/data/extensions/api_test/native_bindings/extension/manifest.json index af843be..6c0391e 100644 --- a/chrome/test/data/extensions/api_test/native_bindings/extension/manifest.json +++ b/chrome/test/data/extensions/api_test/native_bindings/extension/manifest.json
@@ -5,7 +5,7 @@ "manifest_version": 2, "version": "0.1", "permissions": ["idle", "tabs", "cast.streaming", "*://example.com:*/*", - "storage", "privacy", "webNavigation"], + "storage", "privacy"], "background": { "persistent": false, "page": "background.html"
diff --git a/chrome/test/data/extensions/api_test/native_bindings/simple2.html b/chrome/test/data/extensions/api_test/native_bindings/simple2.html deleted file mode 100644 index 6178cae..0000000 --- a/chrome/test/data/extensions/api_test/native_bindings/simple2.html +++ /dev/null
@@ -1,6 +0,0 @@ -<!doctype html> -<html> -<body> -Simple 2 -</body> -</html>
diff --git a/chrome/test/data/webui/md_bookmarks/md_bookmarks_browsertest.js b/chrome/test/data/webui/md_bookmarks/md_bookmarks_browsertest.js index 8205db5..a1d8553 100644 --- a/chrome/test/data/webui/md_bookmarks/md_bookmarks_browsertest.js +++ b/chrome/test/data/webui/md_bookmarks/md_bookmarks_browsertest.js
@@ -27,34 +27,6 @@ ]), }; -function MaterialBookmarksStoreTest() {} - -MaterialBookmarksStoreTest.prototype = { - __proto__: MaterialBookmarksBrowserTest.prototype, - - extraLibraries: MaterialBookmarksBrowserTest.prototype.extraLibraries.concat([ - 'store_test.js', - ]), -}; - -TEST_F('MaterialBookmarksStoreTest', 'All', function() { - mocha.run(); -}); - -function MaterialBookmarksSidebarTest() {} - -MaterialBookmarksSidebarTest.prototype = { - __proto__: MaterialBookmarksBrowserTest.prototype, - - extraLibraries: MaterialBookmarksBrowserTest.prototype.extraLibraries.concat([ - 'sidebar_test.js', - ]), -}; - -TEST_F('MaterialBookmarksSidebarTest', 'All', function() { - mocha.run(); -}); - function MaterialBookmarksEditDialogTest() {} MaterialBookmarksEditDialogTest.prototype = { @@ -143,6 +115,20 @@ mocha.grep('URL preload').run(); }); +function MaterialBookmarksSidebarTest() {} + +MaterialBookmarksSidebarTest.prototype = { + __proto__: MaterialBookmarksBrowserTest.prototype, + + extraLibraries: MaterialBookmarksBrowserTest.prototype.extraLibraries.concat([ + 'sidebar_test.js', + ]), +}; + +TEST_F('MaterialBookmarksSidebarTest', 'All', function() { + mocha.run(); +}); + function MaterialBookmarksStoreClientTest() {} MaterialBookmarksStoreClientTest.prototype = {
diff --git a/chrome/test/data/webui/md_bookmarks/store_test.js b/chrome/test/data/webui/md_bookmarks/store_test.js deleted file mode 100644 index 9d08151..0000000 --- a/chrome/test/data/webui/md_bookmarks/store_test.js +++ /dev/null
@@ -1,442 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -suite('<bookmarks-store>', function() { - var store; - var TEST_TREE; - - function replaceStore() { - store = document.createElement('bookmarks-store'); - replaceBody(store); - store.setupStore_(TEST_TREE); - } - - function navigateTo(route) { - window.history.replaceState({}, '', route); - window.dispatchEvent(new CustomEvent('location-changed')); - } - - /** - * Overrides the chrome.bookmarks.search to pass results into the callback. - * @param {Array} results - */ - function overrideBookmarksSearch(results) { - chrome.bookmarks.search = function(searchTerm, callback) { - callback(results); - }; - } - - /** - * Overrides the chrome.bookmarks.getSubTree to pass results into the - * callback. - * @param {Array} results - */ - function overrideBookmarksGetSubTree(results) { - chrome.bookmarks.getSubTree = function(parentId, callback) { - callback(results); - }; - } - - setup(function() { - TEST_TREE = createFolder('0', [ - createFolder( - '1', - [ - createItem('2', {url: 'link2'}), - createFolder('3', []), - createItem('6', {url: 'link4'}), - createItem('7', {url: 'link5'}), - ]), - createItem('4', {url: 'link4'}), - createItem('5', {url: 'link5'}), - createFolder('8', []), - ]); - - replaceStore(); - }); - - teardown(function() { - // Clean up anything left in URL. - navigateTo('/'); - }); - - ////////////////////////////////////////////////////////////////////////////// - // store initialization tests: - - test('initNodes inserts nodes into idToNodeMap', function() { - assertEquals(TEST_TREE, store.idToNodeMap_['0']); - assertEquals(TEST_TREE.children[0], store.idToNodeMap_['1']); - assertEquals(TEST_TREE.children[0].children[0], store.idToNodeMap_['2']); - assertEquals(TEST_TREE.children[0].children[1], store.idToNodeMap_['3']); - assertEquals(TEST_TREE.children[1], store.idToNodeMap_['4']); - assertEquals(TEST_TREE.children[2], store.idToNodeMap_['5']); - }); - - test('correct paths generated for nodes', function() { - var TEST_PATHS = { - '0': 'rootNode', - '1': 'rootNode.children.#0', - '2': 'rootNode.children.#0.children.#0', - '3': 'rootNode.children.#0.children.#1', - '4': 'rootNode.children.#1', - '5': 'rootNode.children.#2', - '6': 'rootNode.children.#0.children.#2', - '7': 'rootNode.children.#0.children.#3', - '8': 'rootNode.children.#3', - }; - - for (var id in store.idToNodeMap_) - assertEquals(TEST_PATHS[id], store.idToNodeMap_[id].path); - }); - - ////////////////////////////////////////////////////////////////////////////// - // editing bookmarks tree tests: - - test('store updates on selected event', function() { - // First child of root is selected by default. - assertEquals('1', store.selectedId); - assertTrue(store.idToNodeMap_['1'].isSelectedFolder); - - // Selecting a selected folder doesn't deselect it. - store.fire('selected-folder-changed', '1'); - assertEquals('1', store.selectedId); - assertTrue(store.idToNodeMap_['1'].isSelectedFolder); - - // Select a deeply nested descendant. - store.fire('selected-folder-changed', '3'); - assertEquals('3', store.selectedId); - assertTrue(store.idToNodeMap_['3'].isSelectedFolder); - assertFalse(store.idToNodeMap_['1'].isSelectedFolder); - - // Select a folder in separate subtree. - store.fire('selected-folder-changed', '8'); - assertEquals('8', store.selectedId); - assertTrue(store.idToNodeMap_['8'].isSelectedFolder); - assertFalse(store.idToNodeMap_['3'].isSelectedFolder); - }); - - test('store updates on open and close', function() { - // All folders are open by default. - for (var id in store.idToNodeMap_) { - if (store.idToNodeMap_[id].url) - continue; - - assertTrue(store.idToNodeMap_[id].isOpen); - } - - // Closing a folder doesn't close any descendants. - store.fire('folder-open-changed', {id: '1', open: false}); - assertFalse(store.idToNodeMap_['1'].isOpen); - assertTrue(store.idToNodeMap_['3'].isOpen); - store.fire('folder-open-changed', {id: '1', open: true}); - - // Closing an ancestor folder of a selected folder selects the ancestor. - store.fire('selected-folder-changed', '3'); - store.fire('folder-open-changed', {id: '1', open: false}); - assertFalse(store.idToNodeMap_['1'].isOpen); - assertEquals('1', store.selectedId); - assertTrue(store.idToNodeMap_['1'].isSelectedFolder); - assertFalse(store.idToNodeMap_['3'].isSelectedFolder); - }); - - test('parent folder opens when descendant folder is selected', function() { - store.idToNodeMap_['0'].isOpen = false; - store.idToNodeMap_['1'].isOpen = false; - store.idToNodeMap_['3'].isOpen = false; - store.fire('selected-folder-changed', '3'); - assertTrue(store.idToNodeMap_['0'].isOpen); - assertTrue(store.idToNodeMap_['1'].isOpen); - assertFalse(store.idToNodeMap_['3'].isOpen); - }); - - test('deleting a node updates the tree', function() { - removeChild(TEST_TREE, 1); - overrideBookmarksGetSubTree([TEST_TREE]); - // Remove an empty folder/bookmark. - store.onBookmarkRemoved_('4', {parentId: '0', index: 1}); - - // Check the tree is correct. - assertEquals('5', store.rootNode.children[1].id); - - // idToNodeMap_ has been updated. - assertEquals(undefined, store.idToNodeMap_['4']); - assertEquals(store.rootNode.children[1], store.idToNodeMap_['5']); - - // Paths have been updated. - var TEST_PATHS = { - '0': 'rootNode', - '1': 'rootNode.children.#0', - '2': 'rootNode.children.#0.children.#0', - '3': 'rootNode.children.#0.children.#1', - '5': 'rootNode.children.#1', - '6': 'rootNode.children.#0.children.#2', - '7': 'rootNode.children.#0.children.#3', - '8': 'rootNode.children.#2', - }; - - for (var id in store.idToNodeMap_) - assertEquals(TEST_PATHS[id], store.idToNodeMap_[id].path); - - // Remove a folder with children. - removeChild(TEST_TREE, 0); - overrideBookmarksGetSubTree([TEST_TREE]); - - store.onBookmarkRemoved_('1', {parentId: '0', index: '0'}); - - // Check the tree is correct. - assertEquals('5', store.rootNode.children[0].id); - assertEquals('8', store.rootNode.children[1].id); - - // idToNodeMap_ has been updated. - assertEquals(undefined, store.idToNodeMap_['1']); - assertEquals(undefined, store.idToNodeMap_['2']); - assertEquals(undefined, store.idToNodeMap_['3']); - assertEquals(undefined, store.idToNodeMap_['4']); - assertEquals(store.rootNode.children[0], store.idToNodeMap_['5']); - assertEquals(store.rootNode.children[1], store.idToNodeMap_['8']); - - // Paths have been updated. - TEST_PATHS = { - '0': 'rootNode', - '5': 'rootNode.children.#0', - '8': 'rootNode.children.#1' - }; - - for (var id in store.idToNodeMap_) - assertEquals(TEST_PATHS[id], store.idToNodeMap_[id].path); - }); - - test('selectedId updates after removing a selected folder', function() { - // Selected folder gets removed. - store.selectedId = '8'; - removeChild(TEST_TREE, 3); - overrideBookmarksGetSubTree([TEST_TREE]); - - store.onBookmarkRemoved_('8', {parentId:'0', index:'3'}); - assertTrue(store.idToNodeMap_['0'].isSelectedFolder); - assertEquals('0', store.selectedId); - - // A folder with selected folder in it gets removed. - store.selectedId = '3'; - removeChild(TEST_TREE, 0); - overrideBookmarksGetSubTree([TEST_TREE]); - - store.onBookmarkRemoved_('1', {parentId:'0', index:'0'}); - assertTrue(store.idToNodeMap_['0'].isSelectedFolder); - assertEquals('0', store.selectedId); - }); - - test('bookmark gets updated after editing', function() { - // Edit title updates idToNodeMap_ properly. - store.onBookmarkChanged_('4', {'title': 'test'}); - assertEquals('test', store.idToNodeMap_['4'].title); - assertEquals('link4', store.idToNodeMap_['4'].url); - - // Edit url updates idToNodeMap_ properly. - store.onBookmarkChanged_('5', {'url': 'http://www.google.com'}); - assertEquals('', store.idToNodeMap_['5'].title); - assertEquals('http://www.google.com', store.idToNodeMap_['5'].url); - - // Edit url and title updates idToNodeMap_ properly. - store.onBookmarkChanged_('2', { - 'title': 'test', - 'url': 'http://www.google.com', - }); - assertEquals('test', store.idToNodeMap_['2'].title); - assertEquals('http://www.google.com', store.idToNodeMap_['2'].url); - }); - - test('folder gets updated after renaming', function() { - store.onBookmarkChanged_('3', {'title': 'Main Folder'}); - assertEquals('Main Folder', store.idToNodeMap_['3'].title); - assertEquals(undefined, store.idToNodeMap_['3'].url); - }); - - ////////////////////////////////////////////////////////////////////////////// - // search tests: - - test('displayedList updates after searchTerm changes', function() { - var SEARCH_RESULTS = [ - createItem('1', {title: 'cat'}), - createItem('2', {title: 'apple'}), - createItem('3', {title: 'paris'}), - ]; - overrideBookmarksSearch(SEARCH_RESULTS); - - // Search for a non-empty string. - store.searchTerm = 'a'; - assertFalse(store.rootNode.children[0].isSelectedFolder); - assertEquals(null, store.selectedId); - assertEquals(SEARCH_RESULTS, store.displayedList); - - // Clear the searchTerm. - store.searchTerm = ''; - var defaultFolder = store.rootNode.children[0]; - assertTrue(defaultFolder.isSelectedFolder); - assertEquals(defaultFolder.id, store.selectedId); - assertEquals(defaultFolder.children, store.displayedList); - - // Search with no bookmarks returned. - overrideBookmarksSearch([]); - store.searchTerm = 'asdf'; - assertEquals(0, store.displayedList.length); - }); - - ////////////////////////////////////////////////////////////////////////////// - // selection tests: - - test('single select selects the correct bookmark', function() { - for (var id in store.idToNodeMap_) - assertFalse(store.idToNodeMap_[id].isSelectedItem); - - store.fire('select-item', {item: store.idToNodeMap_['2']}); - assertDeepEquals( - [true, false, false, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(0, store.anchorIndex_); - - // Select other item will remove the previous selection. - store.fire('select-item', {item: store.idToNodeMap_['3']}); - assertDeepEquals( - [false, true, false, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(1, store.anchorIndex_); - - // Deleting the selected item will unselect everything. - store.selectedId = '1'; - store.fire('select-item', {item: store.idToNodeMap_['2']}); - removeChild(TEST_TREE.children[0], 0); - overrideBookmarksGetSubTree([TEST_TREE.children[0]]); - store.onBookmarkRemoved_('2', {parentId: '1', index: 0}); - assertDeepEquals( - [false, false, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(null, store.anchorIndex_); - - // Changing the selected folder will remove the select status of the - // bookmark. - store.selectedId = '3'; - assertDeepEquals( - [false, false, false], - store.idToNodeMap_['1'].children.map(i => i.isSelectedItem)); - assertEquals(null, store.anchorIndex_); - }); - - test('shift select selects the correct bookmarks', function() { - // When nothing has been selected, it selects a single item. - assertEquals(null, store.anchorIndex_); - store.fire('select-item', {item: store.idToNodeMap_['6'], range: true}); - assertDeepEquals( - [false, false, true, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(2, store.anchorIndex_); - - // Select an item below the previous selected item. - store.fire('select-item', {item: store.idToNodeMap_['7'], range: true}); - assertEquals(2, store.anchorIndex_); - assertDeepEquals( - [false, false, true, true], - store.displayedList.map(i => i.isSelectedItem)); - - // Select an item above the previous selected item. - store.fire('select-item', {item: store.idToNodeMap_['2'], range: true}); - assertEquals(2, store.anchorIndex_); - assertDeepEquals( - [true, true, true, false], - store.displayedList.map(i => i.isSelectedItem)); - }); - - test('ctrl select selects the correct bookmarks', function() { - // When nothing has been selected, it selects a single item. - assertEquals(null, store.anchorIndex_); - store.fire('select-item', {item: store.idToNodeMap_['6'], add: true}); - assertDeepEquals( - [false, false, true, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(2, store.anchorIndex_); - - // Select a new item will not deselect the previous item, but will update - // anchorIndex_. - store.fire('select-item', {item: store.idToNodeMap_['2'], add: true}); - assertDeepEquals( - [true, false, true, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(0, store.anchorIndex_); - }); - - test('shift + ctrl select selects the correct bookmarks', function() { - store.fire('select-item', {item: store.displayedList[0]}); - store.fire( - 'select-item', {item: store.displayedList[2], add: true, range: false}); - store.fire( - 'select-item', {item: store.displayedList[3], add: true, range: true}); - assertDeepEquals( - [true, false, true, true], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(2, store.anchorIndex_); - }); - - test('selection in search mode', function() { - // Item gets unselected in search. - overrideBookmarksSearch([ - createItem('4', {url: 'link4'}), - createItem('2', {url: 'link2'}), - createItem('5', {url: 'link5'}), - ]); - - store.selectedId = '1'; - store.fire('select-item', {item: store.idToNodeMap_['3']}); - store.searchTerm = 'a'; - assertFalse(store.idToNodeMap_['3'].isSelectedItem); - assertEquals(null, store.anchorIndex_); - - // anchorIndex_ gets updated properly in single select. - store.fire('select-item', {item: store.displayedList[1]}); - assertDeepEquals( - [false, true, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(1, store.anchorIndex_); - - // anchorIndex_ gets updated properly in ctrl select. - store.fire('select-item', {item: store.displayedList[0], add: true}); - assertDeepEquals( - [true, true, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(0, store.anchorIndex_); - - // Deleting the selected item will unselect everything. - store.fire('select-item', {item: store.displayedList[1]}); - overrideBookmarksSearch([ - createItem('4', {url: 'link4'}), - createItem('5', {url: 'link5'}), - ]); - removeChild(TEST_TREE.children[0], 0); - overrideBookmarksGetSubTree([TEST_TREE.children[0]]); - - store.onBookmarkRemoved_('2', {parentId: '1', index: 0}); - assertDeepEquals( - [false, false], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(null, store.anchorIndex_); - - // Shift+Ctrl select selects the right items. - overrideBookmarksSearch([ - createItem('4', {url: 'link4'}), - createFolder('3', []), - createItem('5', {url: 'link5'}), - createItem('6', {url: 'link4'}), - createItem('7', {url: 'link5'}), - ]); - store.searchTerm = 'b'; - store.fire('select-item', {item: store.displayedList[0]}); - store.fire( - 'select-item', {item: store.displayedList[2], add: true, range: false}); - store.fire( - 'select-item', {item: store.displayedList[4], add: true, range: true}); - assertDeepEquals( - [true, false, true, true, true], - store.displayedList.map(i => i.isSelectedItem)); - assertEquals(2, store.anchorIndex_); - }); -});
diff --git a/components/reading_list/ios/BUILD.gn b/components/reading_list/ios/BUILD.gn index 683640e..3f1f0a44 100644 --- a/components/reading_list/ios/BUILD.gn +++ b/components/reading_list/ios/BUILD.gn
@@ -43,8 +43,8 @@ sources = [ "offline_url_utils_unittest.cc", "reading_list_entry_unittest.cc", - "reading_list_model_unittest.mm", - "reading_list_store_unittest.mm", + "reading_list_model_unittest.cc", + "reading_list_store_unittest.cc", ] deps = [ ":ios",
diff --git a/components/reading_list/ios/reading_list_entry.cc b/components/reading_list/ios/reading_list_entry.cc index b49791f4..f7f1ff3 100644 --- a/components/reading_list/ios/reading_list_entry.cc +++ b/components/reading_list/ios/reading_list_entry.cc
@@ -12,6 +12,13 @@ #include "components/sync/protocol/reading_list_specifics.pb.h" #include "net/base/backoff_entry_serializer.h" +namespace { +// Converts |time| to the number of microseconds since Jan 1st 1970. +int64_t TimeToUS(const base::Time& time) { + return (time - base::Time::UnixEpoch()).InMicroseconds(); +} +} + // The backoff time is the following: 10min, 10min, 1h, 2h, 2h..., starting // after the first failure. const net::BackoffEntry::Policy ReadingListEntry::kBackoffPolicy = { @@ -39,19 +46,22 @@ true, // Don't use initial delay unless the last request was an error. }; -ReadingListEntry::ReadingListEntry(const GURL& url, const std::string& title) - : ReadingListEntry(url, title, nullptr){}; +ReadingListEntry::ReadingListEntry(const GURL& url, + const std::string& title, + const base::Time& now) + : ReadingListEntry(url, title, now, nullptr){}; ReadingListEntry::ReadingListEntry(const GURL& url, const std::string& title, + const base::Time& now, std::unique_ptr<net::BackoffEntry> backoff) : ReadingListEntry(url, title, UNSEEN, + TimeToUS(now), 0, - 0, - 0, - 0, + TimeToUS(now), + TimeToUS(now), WAITING, base::FilePath(), GURL(), @@ -93,14 +103,9 @@ } else { backoff_ = base::MakeUnique<net::BackoffEntry>(&kBackoffPolicy); } - if (creation_time_us_ == 0) { - DCHECK(update_time_us_ == 0); - DCHECK(update_title_time_us_ == 0); - creation_time_us_ = - (base::Time::Now() - base::Time::UnixEpoch()).InMicroseconds(); - update_time_us_ = creation_time_us_; - update_title_time_us_ = creation_time_us_; - } + DCHECK(creation_time_us_); + DCHECK(update_time_us_); + DCHECK(update_title_time_us_); DCHECK(!url.is_empty()); DCHECK(url.is_valid()); } @@ -181,26 +186,25 @@ return url_ == other.url_; } -void ReadingListEntry::SetTitle(const std::string& title) { +void ReadingListEntry::SetTitle(const std::string& title, + const base::Time& now) { title_ = title; - update_title_time_us_ = - (base::Time::Now() - base::Time::UnixEpoch()).InMicroseconds(); + update_title_time_us_ = TimeToUS(now); } -void ReadingListEntry::SetRead(bool read) { +void ReadingListEntry::SetRead(bool read, const base::Time& now) { State previous_state = state_; state_ = read ? READ : UNREAD; if (state_ == previous_state) { return; } if (FirstReadTime() == 0 && read) { - first_read_time_us_ = - (base::Time::Now() - base::Time::UnixEpoch()).InMicroseconds(); + first_read_time_us_ = TimeToUS(now); } if (!(previous_state == UNSEEN && state_ == UNREAD)) { // If changing UNSEEN -> UNREAD, entry is not marked updated to preserve // order in Reading List View. - MarkEntryUpdated(); + MarkEntryUpdated(now); } } @@ -215,14 +219,13 @@ void ReadingListEntry::SetDistilledInfo(const base::FilePath& path, const GURL& distilled_url, int64_t distilation_size, - int64_t distilation_time) { + const base::Time& distilation_time) { DCHECK(!path.empty()); DCHECK(distilled_url.is_valid()); distilled_path_ = path; distilled_state_ = PROCESSED; distilled_url_ = distilled_url; - distillation_time_us_ = distilation_time; - ; + distillation_time_us_ = TimeToUS(distilation_time); distillation_size_ = distilation_size; backoff_->Reset(); failed_download_counter_ = 0; @@ -260,14 +263,14 @@ return first_read_time_us_; } -void ReadingListEntry::MarkEntryUpdated() { - update_time_us_ = - (base::Time::Now() - base::Time::UnixEpoch()).InMicroseconds(); +void ReadingListEntry::MarkEntryUpdated(const base::Time& now) { + update_time_us_ = TimeToUS(now); } // static std::unique_ptr<ReadingListEntry> ReadingListEntry::FromReadingListLocal( - const reading_list::ReadingListLocal& pb_entry) { + const reading_list::ReadingListLocal& pb_entry, + const base::Time& now) { if (!pb_entry.has_url()) { return nullptr; } @@ -283,6 +286,8 @@ int64_t creation_time_us = 0; if (pb_entry.has_creation_time_us()) { creation_time_us = pb_entry.creation_time_us(); + } else { + creation_time_us = (now - base::Time::UnixEpoch()).InMicroseconds(); } int64_t first_read_time_us = 0; @@ -290,12 +295,12 @@ first_read_time_us = pb_entry.first_read_time_us(); } - int64_t update_time_us = 0; + int64_t update_time_us = creation_time_us; if (pb_entry.has_update_time_us()) { update_time_us = pb_entry.update_time_us(); } - int64_t update_title_time_us = 0; + int64_t update_title_time_us = creation_time_us; if (pb_entry.has_update_title_time_us()) { update_title_time_us = pb_entry.update_title_time_us(); } @@ -369,7 +374,7 @@ deserializer.Deserialize(nullptr, nullptr)); if (value) { backoff = net::BackoffEntrySerializer::DeserializeFromValue( - *value, &kBackoffPolicy, nullptr, base::Time::Now()); + *value, &kBackoffPolicy, nullptr, now); } } @@ -382,7 +387,8 @@ // static std::unique_ptr<ReadingListEntry> ReadingListEntry::FromReadingListSpecifics( - const sync_pb::ReadingListSpecifics& pb_entry) { + const sync_pb::ReadingListSpecifics& pb_entry, + const base::Time& now) { if (!pb_entry.has_url()) { return nullptr; } @@ -395,7 +401,7 @@ title = pb_entry.title(); } - int64_t creation_time_us = 0; + int64_t creation_time_us = TimeToUS(now); if (pb_entry.has_creation_time_us()) { creation_time_us = pb_entry.creation_time_us(); } @@ -405,12 +411,12 @@ first_read_time_us = pb_entry.first_read_time_us(); } - int64_t update_time_us = 0; + int64_t update_time_us = creation_time_us; if (pb_entry.has_update_time_us()) { update_time_us = pb_entry.update_time_us(); } - int64_t update_title_time_us = 0; + int64_t update_title_time_us = creation_time_us; if (pb_entry.has_update_title_time_us()) { update_title_time_us = pb_entry.update_title_time_us(); } @@ -489,7 +495,7 @@ } std::unique_ptr<reading_list::ReadingListLocal> -ReadingListEntry::AsReadingListLocal() const { +ReadingListEntry::AsReadingListLocal(const base::Time& now) const { std::unique_ptr<reading_list::ReadingListLocal> pb_entry = base::MakeUnique<reading_list::ReadingListLocal>(); @@ -551,8 +557,7 @@ if (backoff_) { std::unique_ptr<base::Value> backoff = - net::BackoffEntrySerializer::SerializeToValue(*backoff_, - base::Time::Now()); + net::BackoffEntrySerializer::SerializeToValue(*backoff_, now); std::string output; JSONStringValueSerializer serializer(&output);
diff --git a/components/reading_list/ios/reading_list_entry.h b/components/reading_list/ios/reading_list_entry.h index fc73f39f0..f296a22 100644 --- a/components/reading_list/ios/reading_list_entry.h +++ b/components/reading_list/ios/reading_list_entry.h
@@ -33,11 +33,28 @@ // An entry in the reading list. The URL is a unique identifier for an entry, as // such it should not be empty and is the only thing considered when comparing // entries. +// A word about timestamp usage in this class: +// - The backing store uses int64 values to code timestamps. We use internally +// the same type to avoid useless conversions. This values represent the +// number of micro seconds since Jan 1st 1970. +// - As most timestamp are used to sort entries, operations on int64_t are +// faster than operations on base::Time. So Getter return the int64_t values. +// - However, to ensure all the conversions are done the same way, and because +// the Now time is alway retrieved using base::Time::Now(), all the timestamp +// parameter are passed as base::Time. These parameters are internally +// converted in int64_t. class ReadingListEntry { public: - ReadingListEntry(const GURL& url, const std::string& title); + // Creates a ReadingList entry. |url| and |title| are the main fields of the + // entry. + // |now| is used to fill the |creation_time_us_| and all the update timestamp + // fields. ReadingListEntry(const GURL& url, const std::string& title, + const base::Time& now); + ReadingListEntry(const GURL& url, + const std::string& title, + const base::Time& now, std::unique_ptr<net::BackoffEntry> backoff); ReadingListEntry(ReadingListEntry&& entry); ~ReadingListEntry(); @@ -94,23 +111,28 @@ // microseconds since Jan 1st 1970. int64_t FirstReadTime() const; - // Set the update time to now. - void MarkEntryUpdated(); + // Set the update time to |now|. + void MarkEntryUpdated(const base::Time& now); // Returns a protobuf encoding the content of this ReadingListEntry for local - // storage. - std::unique_ptr<reading_list::ReadingListLocal> AsReadingListLocal() const; + // storage. Use |now| to serialize the backoff_entry. + std::unique_ptr<reading_list::ReadingListLocal> AsReadingListLocal( + const base::Time& now) const; // Returns a protobuf encoding the content of this ReadingListEntry for sync. std::unique_ptr<sync_pb::ReadingListSpecifics> AsReadingListSpecifics() const; // Created a ReadingListEntry from the protobuf format. + // Use |now| to deserialize the backoff_entry. static std::unique_ptr<ReadingListEntry> FromReadingListLocal( - const reading_list::ReadingListLocal& pb_entry); + const reading_list::ReadingListLocal& pb_entry, + const base::Time& now); // Created a ReadingListEntry from the protobuf format. + // If creation time is not set, it will be set to |now|. static std::unique_ptr<ReadingListEntry> FromReadingListSpecifics( - const sync_pb::ReadingListSpecifics& pb_entry); + const sync_pb::ReadingListSpecifics& pb_entry, + const base::Time& now); // Merge |this| and |other| into this. // Local fields are kept from |this|. @@ -129,19 +151,21 @@ bool operator==(const ReadingListEntry& other) const; - // Sets the title. - void SetTitle(const std::string& title); + // Sets |title_| to |title|. Sets |update_title_time_us_| to |now|. + void SetTitle(const std::string& title, const base::Time& now); // Sets the distilled info (offline path, online URL, size and date of the // stored files) about distilled page, switch the state to PROCESSED and reset // the time until the next try. void SetDistilledInfo(const base::FilePath& path, const GURL& distilled_url, int64_t distilation_size, - int64_t distilation_time); + const base::Time& distilation_time); // Sets the state to one of PROCESSING, WILL_RETRY or ERROR. void SetDistilledState(DistillationState distilled_state); // Sets the read state of the entry. Will set the UpdateTime of the entry. - void SetRead(bool read); + // If |first_read_time_us_| is 0 and read is READ, sets |first_read_time_us_| + // to |now|. + void SetRead(bool read, const base::Time& now); private: enum State { UNSEEN, UNREAD, READ };
diff --git a/components/reading_list/ios/reading_list_entry_unittest.cc b/components/reading_list/ios/reading_list_entry_unittest.cc index a4e38b5..018a7de 100644 --- a/components/reading_list/ios/reading_list_entry_unittest.cc +++ b/components/reading_list/ios/reading_list_entry_unittest.cc
@@ -5,7 +5,6 @@ #include "components/reading_list/ios/reading_list_entry.h" #include "base/memory/ptr_util.h" -#include "base/test/ios/wait_util.h" #include "base/test/simple_test_tick_clock.h" #include "components/reading_list/ios/proto/reading_list.pb.h" #include "components/sync/protocol/reading_list_specifics.pb.h" @@ -21,57 +20,74 @@ } // namespace TEST(ReadingListEntry, CompareIgnoreTitle) { - const ReadingListEntry e1(GURL("http://example.com"), "bar"); - const ReadingListEntry e2(GURL("http://example.com"), "foo"); + const ReadingListEntry e1(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); + const ReadingListEntry e2(GURL("http://example.com"), "foo", + base::Time::FromTimeT(20)); EXPECT_EQ(e1, e2); } -TEST(ReadingListEntry, CompareFailureIgnoreTitle) { - const ReadingListEntry e1(GURL("http://example.com"), "bar"); - const ReadingListEntry e2(GURL("http://example.org"), "bar"); +TEST(ReadingListEntry, CompareFailureIgnoreTitleAndCreationTime) { + const ReadingListEntry e1(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); + const ReadingListEntry e2(GURL("http://example.org"), "bar", + base::Time::FromTimeT(10)); EXPECT_FALSE(e1 == e2); } TEST(ReadingListEntry, MovesAreEquals) { - ReadingListEntry e1(GURL("http://example.com"), "bar"); - ReadingListEntry e2(GURL("http://example.com"), "bar"); - ASSERT_EQ(e1, e2); - ASSERT_EQ(e1.Title(), e2.Title()); + ReadingListEntry e1(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); + ReadingListEntry e2(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); + EXPECT_EQ(e1, e2); + EXPECT_EQ(e1.Title(), e2.Title()); + EXPECT_EQ(e1.CreationTime(), e2.CreationTime()); ReadingListEntry e3(std::move(e1)); EXPECT_EQ(e3, e2); EXPECT_EQ(e3.Title(), e2.Title()); + EXPECT_EQ(e3.CreationTime(), e2.CreationTime()); } TEST(ReadingListEntry, ReadState) { - ReadingListEntry e(GURL("http://example.com"), "bar"); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); EXPECT_FALSE(e.HasBeenSeen()); EXPECT_FALSE(e.IsRead()); - e.SetRead(false); + e.SetRead(false, base::Time::FromTimeT(20)); + EXPECT_EQ(e.CreationTime(), 10 * base::Time::kMicrosecondsPerSecond); + EXPECT_EQ(e.UpdateTime(), 10 * base::Time::kMicrosecondsPerSecond); + EXPECT_EQ(e.UpdateTitleTime(), 10 * base::Time::kMicrosecondsPerSecond); EXPECT_TRUE(e.HasBeenSeen()); EXPECT_FALSE(e.IsRead()); - e.SetRead(true); + e.SetRead(true, base::Time::FromTimeT(30)); + EXPECT_EQ(e.CreationTime(), 10 * base::Time::kMicrosecondsPerSecond); + EXPECT_EQ(e.UpdateTime(), 30 * base::Time::kMicrosecondsPerSecond); + EXPECT_EQ(e.UpdateTitleTime(), 10 * base::Time::kMicrosecondsPerSecond); EXPECT_TRUE(e.HasBeenSeen()); EXPECT_TRUE(e.IsRead()); } TEST(ReadingListEntry, UpdateTitle) { - ReadingListEntry e(GURL("http://example.com"), "bar"); - ASSERT_EQ("bar", e.Title()); - ASSERT_EQ(e.CreationTime(), e.UpdateTitleTime()); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); + EXPECT_EQ("bar", e.Title()); + // Getters are in microseconds. + EXPECT_EQ(e.CreationTime(), 10 * base::Time::kMicrosecondsPerSecond); + EXPECT_EQ(e.UpdateTitleTime(), 10 * base::Time::kMicrosecondsPerSecond); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - e.SetTitle("foo"); - EXPECT_GT(e.UpdateTitleTime(), e.CreationTime()); + e.SetTitle("foo", base::Time::FromTimeT(15)); + EXPECT_EQ(e.UpdateTitleTime(), 15 * base::Time::kMicrosecondsPerSecond); EXPECT_EQ("foo", e.Title()); } TEST(ReadingListEntry, DistilledInfo) { - ReadingListEntry e(GURL("http://example.com"), "bar"); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); EXPECT_TRUE(e.DistilledPath().empty()); @@ -79,15 +95,17 @@ const GURL distilled_url("http://example.com/distilled"); int64_t size = 50; int64_t time = 100; - e.SetDistilledInfo(distilled_path, distilled_url, size, time); + e.SetDistilledInfo(distilled_path, distilled_url, size, + base::Time::FromTimeT(time)); EXPECT_EQ(distilled_path, e.DistilledPath()); EXPECT_EQ(distilled_url, e.DistilledURL()); EXPECT_EQ(size, e.DistillationSize()); - EXPECT_EQ(e.DistillationTime(), time); + EXPECT_EQ(e.DistillationTime(), time * base::Time::kMicrosecondsPerSecond); } TEST(ReadingListEntry, DistilledState) { - ReadingListEntry e(GURL("http://example.com"), "bar"); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); EXPECT_EQ(ReadingListEntry::WAITING, e.DistilledState()); @@ -96,7 +114,8 @@ const base::FilePath distilled_path("distilled/page.html"); const GURL distilled_url("http://example.com/distilled"); - e.SetDistilledInfo(distilled_path, distilled_url, 50, 100); + e.SetDistilledInfo(distilled_path, distilled_url, 50, + base::Time::FromTimeT(100)); EXPECT_EQ(ReadingListEntry::PROCESSED, e.DistilledState()); } @@ -108,11 +127,13 @@ base::MakeUnique<net::BackoffEntry>(&ReadingListEntry::kBackoffPolicy, &clock); - ReadingListEntry e(GURL("http://example.com"), "bar", std::move(backoff)); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10), std::move(backoff)); - double fuzzing = ReadingListEntry::kBackoffPolicy.jitter_factor; + // Allow twice the jitter as test is not instantaneous. + double fuzzing = 2 * ReadingListEntry::kBackoffPolicy.jitter_factor; - ASSERT_EQ(0, e.TimeUntilNextTry().InSeconds()); + EXPECT_EQ(0, e.TimeUntilNextTry().InSeconds()); // First error. e.SetDistilledState(ReadingListEntry::ERROR); @@ -159,7 +180,8 @@ std::unique_ptr<net::BackoffEntry> backoff = base::MakeUnique<net::BackoffEntry>(&ReadingListEntry::kBackoffPolicy, &clock); - ReadingListEntry e(GURL("http://example.com"), "bar", std::move(backoff)); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10), std::move(backoff)); double fuzzing = ReadingListEntry::kBackoffPolicy.jitter_factor; e.SetDistilledState(ReadingListEntry::ERROR); @@ -180,7 +202,8 @@ std::unique_ptr<net::BackoffEntry> backoff = base::MakeUnique<net::BackoffEntry>(&ReadingListEntry::kBackoffPolicy, &clock); - ReadingListEntry e(GURL("http://example.com"), "bar", std::move(backoff)); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10), std::move(backoff)); double fuzzing = ReadingListEntry::kBackoffPolicy.jitter_factor; e.SetDistilledState(ReadingListEntry::ERROR); @@ -190,7 +213,8 @@ // Action. const base::FilePath distilled_path("distilled/page.html"); const GURL distilled_url("http://example.com/distilled"); - e.SetDistilledInfo(distilled_path, distilled_url, 50, 100); + e.SetDistilledInfo(distilled_path, distilled_url, 50, + base::Time::FromTimeT(100)); // Test. EXPECT_EQ(0, e.TimeUntilNextTry().InSeconds()); @@ -202,9 +226,10 @@ // Tests that the failed download counter is incremented when the state change // from non-error to error. TEST(ReadingListEntry, FailedDownloadCounter) { - ReadingListEntry e(GURL("http://example.com"), "bar"); + ReadingListEntry e(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); - ASSERT_EQ(0, e.FailedDownloadCounter()); + EXPECT_EQ(0, e.FailedDownloadCounter()); e.SetDistilledState(ReadingListEntry::ERROR); EXPECT_EQ(1, e.FailedDownloadCounter()); @@ -223,7 +248,8 @@ // Tests that the reading list entry is correctly encoded to // sync_pb::ReadingListSpecifics. TEST(ReadingListEntry, AsReadingListSpecifics) { - ReadingListEntry entry(GURL("http://example.com/"), "bar"); + ReadingListEntry entry(GURL("http://example.com"), "bar", + base::Time::FromTimeT(10)); int64_t creation_time_us = entry.UpdateTime(); std::unique_ptr<sync_pb::ReadingListSpecifics> pb_entry( @@ -235,8 +261,10 @@ EXPECT_EQ(pb_entry->update_time_us(), entry.UpdateTime()); EXPECT_EQ(pb_entry->status(), sync_pb::ReadingListSpecifics::UNSEEN); - entry.SetRead(true); - EXPECT_NE(entry.UpdateTime(), creation_time_us); + entry.SetRead(true, base::Time::FromTimeT(15)); + // Getters are in microseconds. + EXPECT_EQ(entry.CreationTime(), 10 * base::Time::kMicrosecondsPerSecond); + EXPECT_EQ(entry.UpdateTime(), 15 * base::Time::kMicrosecondsPerSecond); std::unique_ptr<sync_pb::ReadingListSpecifics> updated_pb_entry( entry.AsReadingListSpecifics()); EXPECT_EQ(updated_pb_entry->creation_time_us(), creation_time_us); @@ -254,24 +282,31 @@ pb_entry->set_title("title"); pb_entry->set_creation_time_us(1); pb_entry->set_update_time_us(2); + pb_entry->set_update_title_time_us(3); pb_entry->set_status(sync_pb::ReadingListSpecifics::UNREAD); std::unique_ptr<ReadingListEntry> entry( - ReadingListEntry::FromReadingListSpecifics(*pb_entry)); + ReadingListEntry::FromReadingListSpecifics(*pb_entry, + base::Time::FromTimeT(10))); EXPECT_EQ(entry->URL().spec(), "http://example.com/"); EXPECT_EQ(entry->Title(), "title"); + EXPECT_EQ(entry->CreationTime(), 1); EXPECT_EQ(entry->UpdateTime(), 2); + EXPECT_EQ(entry->UpdateTitleTime(), 3); EXPECT_EQ(entry->FailedDownloadCounter(), 0); } // Tests that the reading list entry is correctly encoded to // reading_list::ReadingListLocal. TEST(ReadingListEntry, AsReadingListLocal) { - ReadingListEntry entry(GURL("http://example.com/"), "bar"); + ReadingListEntry entry(GURL("http://example.com/"), "foo", + base::Time::FromTimeT(10)); int64_t creation_time_us = entry.UpdateTime(); + entry.SetTitle("bar", base::Time::FromTimeT(20)); + entry.MarkEntryUpdated(base::Time::FromTimeT(30)); std::unique_ptr<reading_list::ReadingListLocal> pb_entry( - entry.AsReadingListLocal()); + entry.AsReadingListLocal(base::Time::FromTimeT(40))); EXPECT_EQ(pb_entry->entry_id(), "http://example.com/"); EXPECT_EQ(pb_entry->url(), "http://example.com/"); EXPECT_EQ(pb_entry->title(), "bar"); @@ -286,7 +321,7 @@ entry.SetDistilledState(ReadingListEntry::WILL_RETRY); std::unique_ptr<reading_list::ReadingListLocal> will_retry_pb_entry( - entry.AsReadingListLocal()); + entry.AsReadingListLocal(base::Time::FromTimeT(50))); EXPECT_EQ(will_retry_pb_entry->distillation_state(), reading_list::ReadingListLocal::WILL_RETRY); EXPECT_EQ(will_retry_pb_entry->failed_download_counter(), 1); @@ -294,13 +329,14 @@ const base::FilePath distilled_path("distilled/page.html"); const GURL distilled_url("http://example.com/distilled"); int64_t size = 50; - entry.SetDistilledInfo(distilled_path, distilled_url, size, 100); + entry.SetDistilledInfo(distilled_path, distilled_url, size, + base::Time::FromTimeT(100)); - entry.SetRead(true); - entry.MarkEntryUpdated(); + entry.SetRead(true, base::Time::FromTimeT(20)); + entry.MarkEntryUpdated(base::Time::FromTimeT(30)); EXPECT_NE(entry.UpdateTime(), creation_time_us); std::unique_ptr<reading_list::ReadingListLocal> distilled_pb_entry( - entry.AsReadingListLocal()); + entry.AsReadingListLocal(base::Time::FromTimeT(40))); EXPECT_EQ(distilled_pb_entry->creation_time_us(), creation_time_us); EXPECT_EQ(distilled_pb_entry->update_time_us(), entry.UpdateTime()); EXPECT_NE(distilled_pb_entry->backoff(), ""); @@ -317,11 +353,12 @@ // Tests that the reading list entry is correctly parsed from // sync_pb::ReadingListLocal. TEST(ReadingListEntry, FromReadingListLocal) { - ReadingListEntry entry(GURL("http://example.com/"), "title"); + ReadingListEntry entry(GURL("http://example.com/"), "title", + base::Time::FromTimeT(10)); entry.SetDistilledState(ReadingListEntry::ERROR); std::unique_ptr<reading_list::ReadingListLocal> pb_entry( - entry.AsReadingListLocal()); + entry.AsReadingListLocal(base::Time::FromTimeT(10))); int64_t now = 12345; pb_entry->set_entry_id("http://example.com/"); @@ -329,6 +366,7 @@ pb_entry->set_title("title"); pb_entry->set_creation_time_us(1); pb_entry->set_update_time_us(2); + pb_entry->set_update_title_time_us(3); pb_entry->set_status(reading_list::ReadingListLocal::UNREAD); pb_entry->set_distillation_state(reading_list::ReadingListLocal::WAITING); pb_entry->set_failed_download_counter(2); @@ -336,16 +374,19 @@ pb_entry->set_distillation_size(50); std::unique_ptr<ReadingListEntry> waiting_entry( - ReadingListEntry::FromReadingListLocal(*pb_entry)); + ReadingListEntry::FromReadingListLocal(*pb_entry, + base::Time::FromTimeT(20))); EXPECT_EQ(waiting_entry->URL().spec(), "http://example.com/"); EXPECT_EQ(waiting_entry->Title(), "title"); EXPECT_EQ(waiting_entry->UpdateTime(), 2); + EXPECT_EQ(waiting_entry->UpdateTitleTime(), 3); EXPECT_EQ(waiting_entry->FailedDownloadCounter(), 2); EXPECT_EQ(waiting_entry->DistilledState(), ReadingListEntry::WAITING); EXPECT_EQ(waiting_entry->DistilledPath(), base::FilePath()); EXPECT_EQ(waiting_entry->DistillationSize(), 50); EXPECT_EQ(waiting_entry->DistillationTime(), now); - double fuzzing = ReadingListEntry::kBackoffPolicy.jitter_factor; + // Allow twice the jitter as test is not instantaneous. + double fuzzing = 2 * ReadingListEntry::kBackoffPolicy.jitter_factor; int nextTry = waiting_entry->TimeUntilNextTry().InMinutes(); EXPECT_NEAR(kFirstBackoff, nextTry, kFirstBackoff * fuzzing); } @@ -354,22 +395,28 @@ // Additional merging tests are done in // ReadingListStoreTest.CompareEntriesForSync TEST(ReadingListEntry, MergeWithEntry) { - ReadingListEntry local_entry(GURL("http://example.com/"), "title"); + ReadingListEntry local_entry(GURL("http://example.com/"), "title", + base::Time::FromTimeT(10)); local_entry.SetDistilledState(ReadingListEntry::ERROR); + local_entry.SetTitle("title updated", base::Time::FromTimeT(30)); int64_t local_update_time_us = local_entry.UpdateTime(); - ReadingListEntry sync_entry(GURL("http://example.com/"), "title2"); + ReadingListEntry sync_entry(GURL("http://example.com/"), "title2", + base::Time::FromTimeT(20)); sync_entry.SetDistilledState(ReadingListEntry::ERROR); int64_t sync_update_time_us = sync_entry.UpdateTime(); EXPECT_NE(local_update_time_us, sync_update_time_us); local_entry.MergeWithEntry(sync_entry); EXPECT_EQ(local_entry.URL().spec(), "http://example.com/"); - EXPECT_EQ(local_entry.Title(), "title2"); + EXPECT_EQ(local_entry.Title(), "title updated"); + EXPECT_EQ(local_entry.UpdateTitleTime(), + 30 * base::Time::kMicrosecondsPerSecond); EXPECT_FALSE(local_entry.HasBeenSeen()); EXPECT_EQ(local_entry.UpdateTime(), sync_update_time_us); EXPECT_EQ(local_entry.FailedDownloadCounter(), 1); EXPECT_EQ(local_entry.DistilledState(), ReadingListEntry::ERROR); - double fuzzing = ReadingListEntry::kBackoffPolicy.jitter_factor; + // Allow twice the jitter as test is not instantaneous. + double fuzzing = 2 * ReadingListEntry::kBackoffPolicy.jitter_factor; int nextTry = local_entry.TimeUntilNextTry().InMinutes(); EXPECT_NEAR(kFirstBackoff, nextTry, kFirstBackoff * fuzzing); }
diff --git a/components/reading_list/ios/reading_list_model.h b/components/reading_list/ios/reading_list_model.h index 42e536f8..50d983fb 100644 --- a/components/reading_list/ios/reading_list_model.h +++ b/components/reading_list/ios/reading_list_model.h
@@ -128,7 +128,7 @@ const base::FilePath& distilled_path, const GURL& distilled_url, int64_t distilation_size, - int64_t distilation_time) = 0; + const base::Time& distilation_time) = 0; // Observer registration methods. The model will remove all observers upon // destruction automatically.
diff --git a/components/reading_list/ios/reading_list_model_impl.cc b/components/reading_list/ios/reading_list_model_impl.cc index 263caa98..aad6432 100644 --- a/components/reading_list/ios/reading_list_model_impl.cc +++ b/components/reading_list/ios/reading_list_model_impl.cc
@@ -8,29 +8,30 @@ #include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/strings/string_util.h" +#include "base/time/clock.h" #include "components/prefs/pref_service.h" #include "components/reading_list/ios/reading_list_model_storage.h" #include "components/reading_list/ios/reading_list_pref_names.h" #include "url/gurl.h" -ReadingListModelImpl::ReadingListModelImpl() - : ReadingListModelImpl(nullptr, nullptr) {} - ReadingListModelImpl::ReadingListModelImpl( std::unique_ptr<ReadingListModelStorage> storage, - PrefService* pref_service) + PrefService* pref_service, + std::unique_ptr<base::Clock> clock) : entries_(base::MakeUnique<ReadingListEntries>()), unread_entry_count_(0), read_entry_count_(0), unseen_entry_count_(0), + clock_(std::move(clock)), pref_service_(pref_service), has_unseen_(false), loaded_(false), weak_ptr_factory_(this) { DCHECK(CalledOnValidThread()); + DCHECK(clock_); if (storage) { storage_layer_ = std::move(storage); - storage_layer_->SetReadingListModel(this, this); + storage_layer_->SetReadingListModel(this, this, clock_.get()); } else { loaded_ = true; } @@ -134,7 +135,7 @@ observer.ReadingListWillUpdateEntry(this, iterator.first); } UpdateEntryStateCountersOnEntryRemoval(entry); - entry.SetRead(false); + entry.SetRead(false, clock_->Now()); UpdateEntryStateCountersOnEntryInsertion(entry); if (storage_layer_) { storage_layer_->SaveEntry(entry); @@ -330,7 +331,7 @@ std::string trimmed_title = base::CollapseWhitespaceASCII(title, false); - ReadingListEntry entry(url, trimmed_title); + ReadingListEntry entry(url, trimmed_title, clock_->Now()); for (auto& observer : observers_) observer.ReadingListWillAddEntry(this, entry); UpdateEntryStateCountersOnEntryInsertion(entry); @@ -364,8 +365,8 @@ observer.ReadingListWillMoveEntry(this, url); } UpdateEntryStateCountersOnEntryRemoval(entry); - entry.SetRead(read); - entry.MarkEntryUpdated(); + entry.SetRead(read, clock_->Now()); + entry.MarkEntryUpdated(clock_->Now()); UpdateEntryStateCountersOnEntryInsertion(entry); if (storage_layer_) { @@ -394,7 +395,7 @@ for (ReadingListModelObserver& observer : observers_) { observer.ReadingListWillUpdateEntry(this, url); } - entry.SetTitle(trimmed_title); + entry.SetTitle(trimmed_title, clock_->Now()); if (storage_layer_) { storage_layer_->SaveEntry(entry); } @@ -408,7 +409,7 @@ const base::FilePath& distilled_path, const GURL& distilled_url, int64_t distillation_size, - int64_t distillation_date) { + const base::Time& distillation_date) { DCHECK(CalledOnValidThread()); DCHECK(loaded()); auto iterator = entries_->find(url);
diff --git a/components/reading_list/ios/reading_list_model_impl.h b/components/reading_list/ios/reading_list_model_impl.h index b8beb664..ef6f1ba 100644 --- a/components/reading_list/ios/reading_list_model_impl.h +++ b/components/reading_list/ios/reading_list_model_impl.h
@@ -14,6 +14,10 @@ #include "components/reading_list/ios/reading_list_model_storage.h" #include "components/reading_list/ios/reading_list_store_delegate.h" +namespace base { +class Clock; +} + class PrefService; // Concrete implementation of a reading list model using in memory lists. @@ -24,12 +28,14 @@ using ReadingListEntries = std::map<GURL, ReadingListEntry>; // Initialize a ReadingListModelImpl to load and save data in - // |persistence_layer|. + // |storage_layer|. Passing null to |storage_layer| will create a + // ReadingListModelImpl without persistence. Data will not be persistent + // across sessions. + // |clock| will be used to timestamp all the operations. ReadingListModelImpl(std::unique_ptr<ReadingListModelStorage> storage_layer, - PrefService* pref_service); + PrefService* pref_service, + std::unique_ptr<base::Clock> clock_); - // Initialize a ReadingListModelImpl without persistence. Data will not be - // persistent across sessions. ReadingListModelImpl(); syncer::ModelTypeSyncBridge* GetModelTypeSyncBridge() override; @@ -73,7 +79,7 @@ const base::FilePath& distilled_path, const GURL& distilled_url, int64_t distillation_size, - int64_t distillation_date) override; + const base::Time& distillation_date) override; void SyncAddEntry(std::unique_ptr<ReadingListEntry> entry) override; ReadingListEntry* SyncMergeEntry( @@ -132,11 +138,15 @@ // Set the unseen flag to true. void SetUnseenFlag(); + // |storage_layer_| depends on |clock_| so keep the order. + std::unique_ptr<base::Clock> clock_; std::unique_ptr<ReadingListModelStorage> storage_layer_; PrefService* pref_service_; bool has_unseen_; bool loaded_; + base::WeakPtrFactory<ReadingListModelImpl> weak_ptr_factory_; + DISALLOW_COPY_AND_ASSIGN(ReadingListModelImpl); };
diff --git a/components/reading_list/ios/reading_list_model_storage.h b/components/reading_list/ios/reading_list_model_storage.h index f0ac31a..40a5a540 100644 --- a/components/reading_list/ios/reading_list_model_storage.h +++ b/components/reading_list/ios/reading_list_model_storage.h
@@ -15,6 +15,10 @@ class ReadingListModel; class ReadingListStoreDelegate; +namespace base { +class Clock; +} + namespace syncer { class ModelTypeSyncBridge; } @@ -32,8 +36,11 @@ // Sets the model the Storage is backing. // This will trigger store initalization and load persistent entries. + // Pass the |clock| from the |model| to ensure synchroization when loading + // entries. virtual void SetReadingListModel(ReadingListModel* model, - ReadingListStoreDelegate* delegate) = 0; + ReadingListStoreDelegate* delegate, + base::Clock* clock) = 0; // Starts a transaction. All Save/Remove entry will be delayed until the // transaction is commited.
diff --git a/components/reading_list/ios/reading_list_model_unittest.mm b/components/reading_list/ios/reading_list_model_unittest.cc similarity index 85% rename from components/reading_list/ios/reading_list_model_unittest.mm rename to components/reading_list/ios/reading_list_model_unittest.cc index b8bc5c1..af9e3a0 100644 --- a/components/reading_list/ios/reading_list_model_unittest.mm +++ b/components/reading_list/ios/reading_list_model_unittest.cc
@@ -6,7 +6,7 @@ #include "base/bind.h" #include "base/memory/ptr_util.h" -#import "base/test/ios/wait_util.h" +#include "base/test/simple_test_clock.h" #include "components/reading_list/ios/reading_list_model_impl.h" #include "components/reading_list/ios/reading_list_model_storage.h" #include "components/reading_list/ios/reading_list_store_delegate.h" @@ -19,6 +19,11 @@ const GURL callback_url("http://example.com"); const std::string callback_title("test title"); +base::Time AdvanceAndGetTime(base::SimpleTestClock* clock) { + clock->Advance(base::TimeDelta::FromMilliseconds(10)); + return clock->Now(); +} + class TestReadingListStorageObserver { public: virtual void ReadingListDidSaveEntry() = 0; @@ -27,66 +32,63 @@ class TestReadingListStorage : public ReadingListModelStorage { public: - TestReadingListStorage(TestReadingListStorageObserver* observer) + TestReadingListStorage(TestReadingListStorageObserver* observer, + base::SimpleTestClock* clock) : ReadingListModelStorage( base::Bind(&syncer::ModelTypeChangeProcessor::Create, base::RepeatingClosure()), syncer::READING_LIST), entries_(new ReadingListStoreDelegate::ReadingListEntries()), - observer_(observer) {} + observer_(observer), + clock_(clock) {} void AddSampleEntries() { // Adds timer and interlace read/unread entry creation to avoid having two // entries with the same creation timestamp. - ReadingListEntry unread_a(GURL("http://unread_a.com"), "unread_a"); + ReadingListEntry unread_a(GURL("http://unread_a.com"), "unread_a", + AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://unread_a.com"), std::move(unread_a))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - ReadingListEntry read_a(GURL("http://read_a.com"), "read_a"); - read_a.SetRead(true); + ReadingListEntry read_a(GURL("http://read_a.com"), "read_a", + AdvanceAndGetTime(clock_)); + read_a.SetRead(true, AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://read_a.com"), std::move(read_a))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - ReadingListEntry unread_b(GURL("http://unread_b.com"), "unread_b"); + ReadingListEntry unread_b(GURL("http://unread_b.com"), "unread_b", + AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://unread_b.com"), std::move(unread_b))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - ReadingListEntry read_b(GURL("http://read_b.com"), "read_b"); - read_b.SetRead(true); + ReadingListEntry read_b(GURL("http://read_b.com"), "read_b", + AdvanceAndGetTime(clock_)); + read_b.SetRead(true, AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://read_b.com"), std::move(read_b))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - ReadingListEntry unread_c(GURL("http://unread_c.com"), "unread_c"); + ReadingListEntry unread_c(GURL("http://unread_c.com"), "unread_c", + AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://unread_c.com"), std::move(unread_c))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - ReadingListEntry read_c(GURL("http://read_c.com"), "read_c"); - read_c.SetRead(true); + ReadingListEntry read_c(GURL("http://read_c.com"), "read_c", + AdvanceAndGetTime(clock_)); + read_c.SetRead(true, AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://read_c.com"), std::move(read_c))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); - ReadingListEntry unread_d(GURL("http://unread_d.com"), "unread_d"); + ReadingListEntry unread_d(GURL("http://unread_d.com"), "unread_d", + AdvanceAndGetTime(clock_)); entries_->insert( std::make_pair(GURL("http://unread_d.com"), std::move(unread_d))); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(5)); } void SetReadingListModel(ReadingListModel* model, - ReadingListStoreDelegate* delegate) override { + ReadingListStoreDelegate* delegate, + base::Clock* clock) override { delegate->StoreLoaded(std::move(entries_)); + clock_ = static_cast<base::SimpleTestClock*>(clock); } // Saves or updates an entry. If the entry is not yet in the database, it is @@ -148,22 +150,28 @@ private: std::unique_ptr<ReadingListStoreDelegate::ReadingListEntries> entries_; TestReadingListStorageObserver* observer_; + base::SimpleTestClock* clock_; }; class ReadingListModelTest : public ReadingListModelObserver, public TestReadingListStorageObserver, public testing::Test { public: - ReadingListModelTest() - : callback_called_(false), model_(new ReadingListModelImpl()) { + ReadingListModelTest() : callback_called_(false) { + auto clock = base::MakeUnique<base::SimpleTestClock>(); + clock_ = clock.get(); + model_ = base::MakeUnique<ReadingListModelImpl>(nullptr, nullptr, + std::move(clock)); ClearCounts(); model_->AddObserver(this); } ~ReadingListModelTest() override {} - void SetStorage(std::unique_ptr<TestReadingListStorage> storage) { - model_ = - base::MakeUnique<ReadingListModelImpl>(std::move(storage), nullptr); + void SetStorage(std::unique_ptr<TestReadingListStorage> storage, + std::unique_ptr<base::SimpleTestClock> clock) { + clock_ = clock.get(); + model_ = base::MakeUnique<ReadingListModelImpl>(std::move(storage), nullptr, + std::move(clock)); ClearCounts(); model_->AddObserver(this); } @@ -294,6 +302,8 @@ bool callback_called_; std::unique_ptr<ReadingListModelImpl> model_; + // Owned by |model_|; + base::SimpleTestClock* clock_; }; // Tests creating an empty model. @@ -310,9 +320,10 @@ // Tests load model. TEST_F(ReadingListModelTest, ModelLoaded) { ClearCounts(); - auto storage = base::MakeUnique<TestReadingListStorage>(this); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + auto storage = base::MakeUnique<TestReadingListStorage>(this, clock.get()); storage->AddSampleEntries(); - SetStorage(std::move(storage)); + SetStorage(std::move(storage), std::move(clock)); AssertObserverCount(1, 0, 0, 0, 0, 0, 0, 0, 0); std::map<GURL, std::string> loaded_entries; @@ -334,8 +345,9 @@ // Tests adding entry. TEST_F(ReadingListModelTest, AddEntry) { - auto storage = base::MakeUnique<TestReadingListStorage>(this); - SetStorage(std::move(storage)); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + auto storage = base::MakeUnique<TestReadingListStorage>(this, clock.get()); + SetStorage(std::move(storage), std::move(clock)); ClearCounts(); const ReadingListEntry& entry = @@ -360,11 +372,12 @@ // Tests addin entry from sync. TEST_F(ReadingListModelTest, SyncAddEntry) { - auto storage = base::MakeUnique<TestReadingListStorage>(this); - SetStorage(std::move(storage)); - auto entry = - base::MakeUnique<ReadingListEntry>(GURL("http://example.com"), "sample"); - entry->SetRead(true); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + auto storage = base::MakeUnique<TestReadingListStorage>(this, clock.get()); + SetStorage(std::move(storage), std::move(clock)); + auto entry = base::MakeUnique<ReadingListEntry>( + GURL("http://example.com"), "sample", AdvanceAndGetTime(clock_)); + entry->SetRead(true, AdvanceAndGetTime(clock_)); ClearCounts(); model_->SyncAddEntry(std::move(entry)); @@ -377,8 +390,9 @@ // Tests updating entry from sync. TEST_F(ReadingListModelTest, SyncMergeEntry) { - auto storage = base::MakeUnique<TestReadingListStorage>(this); - SetStorage(std::move(storage)); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + auto storage = base::MakeUnique<TestReadingListStorage>(this, clock.get()); + SetStorage(std::move(storage), std::move(clock)); model_->AddEntry(GURL("http://example.com"), "sample", reading_list::ADDED_VIA_CURRENT_APP); const base::FilePath distilled_path("distilled/page.html"); @@ -386,16 +400,15 @@ int64_t size = 50; int64_t time = 100; model_->SetEntryDistilledInfo(GURL("http://example.com"), distilled_path, - distilled_url, size, time); + distilled_url, size, + base::Time::FromTimeT(time)); const ReadingListEntry* local_entry = model_->GetEntryByURL(GURL("http://example.com")); int64_t local_update_time = local_entry->UpdateTime(); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(10)); - auto sync_entry = - base::MakeUnique<ReadingListEntry>(GURL("http://example.com"), "sample"); - sync_entry->SetRead(true); + auto sync_entry = base::MakeUnique<ReadingListEntry>( + GURL("http://example.com"), "sample", AdvanceAndGetTime(clock_)); + sync_entry->SetRead(true, AdvanceAndGetTime(clock_)); ASSERT_GT(sync_entry->UpdateTime(), local_update_time); int64_t sync_update_time = sync_entry->UpdateTime(); EXPECT_TRUE(sync_entry->DistilledPath().empty()); @@ -411,13 +424,15 @@ base::FilePath("distilled/page.html")); EXPECT_EQ(merged_entry->UpdateTime(), sync_update_time); EXPECT_EQ(size, merged_entry->DistillationSize()); - EXPECT_EQ(time, merged_entry->DistillationTime()); + EXPECT_EQ(time * base::Time::kMicrosecondsPerSecond, + merged_entry->DistillationTime()); } // Tests deleting entry. TEST_F(ReadingListModelTest, RemoveEntryByUrl) { - auto storage = base::MakeUnique<TestReadingListStorage>(this); - SetStorage(std::move(storage)); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + auto storage = base::MakeUnique<TestReadingListStorage>(this, clock.get()); + SetStorage(std::move(storage), std::move(clock)); model_->AddEntry(GURL("http://example.com"), "sample", reading_list::ADDED_VIA_CURRENT_APP); ClearCounts(); @@ -448,8 +463,9 @@ // Tests deleting entry from sync. TEST_F(ReadingListModelTest, RemoveSyncEntryByUrl) { - auto storage = base::MakeUnique<TestReadingListStorage>(this); - SetStorage(std::move(storage)); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + auto storage = base::MakeUnique<TestReadingListStorage>(this, clock.get()); + SetStorage(std::move(storage), std::move(clock)); model_->AddEntry(GURL("http://example.com"), "sample", reading_list::ADDED_VIA_CURRENT_APP); ClearCounts(); @@ -631,13 +647,15 @@ int64_t size = 50; int64_t time = 100; model_->SetEntryDistilledInfo(GURL("http://example.com"), distilled_path, - distilled_url, size, time); + distilled_url, size, + base::Time::FromTimeT(time)); AssertObserverCount(0, 0, 0, 0, 0, 0, 0, 1, 1); EXPECT_EQ(ReadingListEntry::PROCESSED, entry.DistilledState()); EXPECT_EQ(distilled_path, entry.DistilledPath()); EXPECT_EQ(distilled_url, entry.DistilledURL()); EXPECT_EQ(size, entry.DistillationSize()); - EXPECT_EQ(time, entry.DistillationTime()); + EXPECT_EQ(time * base::Time::kMicrosecondsPerSecond, + entry.DistillationTime()); } // Tests setting title on read entry. @@ -679,13 +697,15 @@ int64_t size = 50; int64_t time = 100; model_->SetEntryDistilledInfo(GURL("http://example.com"), distilled_path, - distilled_url, size, time); + distilled_url, size, + base::Time::FromTimeT(time)); AssertObserverCount(0, 0, 0, 0, 0, 0, 0, 1, 1); EXPECT_EQ(ReadingListEntry::PROCESSED, entry->DistilledState()); EXPECT_EQ(distilled_path, entry->DistilledPath()); EXPECT_EQ(distilled_url, entry->DistilledURL()); EXPECT_EQ(size, entry->DistillationSize()); - EXPECT_EQ(time, entry->DistillationTime()); + EXPECT_EQ(time * base::Time::kMicrosecondsPerSecond, + entry->DistillationTime()); } // Tests that ReadingListModel calls CallbackModelBeingDeleted when destroyed.
diff --git a/components/reading_list/ios/reading_list_store.cc b/components/reading_list/ios/reading_list_store.cc index 087ad62..c61f85be 100644 --- a/components/reading_list/ios/reading_list_store.cc +++ b/components/reading_list/ios/reading_list_store.cc
@@ -10,6 +10,7 @@ #include "base/bind.h" #include "base/logging.h" #include "base/memory/ptr_util.h" +#include "base/time/clock.h" #include "components/reading_list/ios/proto/reading_list.pb.h" #include "components/reading_list/ios/reading_list_model_impl.h" #include "components/sync/model/entity_change.h" @@ -32,10 +33,12 @@ } void ReadingListStore::SetReadingListModel(ReadingListModel* model, - ReadingListStoreDelegate* delegate) { + ReadingListStoreDelegate* delegate, + base::Clock* clock) { DCHECK(CalledOnValidThread()); model_ = model; delegate_ = delegate; + clock_ = clock; create_store_callback_.Run( base::Bind(&ReadingListStore::OnStoreCreated, base::AsWeakPtr(this))); } @@ -79,7 +82,7 @@ auto token = EnsureBatchCreated(); std::unique_ptr<reading_list::ReadingListLocal> pb_entry = - entry.AsReadingListLocal(); + entry.AsReadingListLocal(clock_->Now()); batch_->WriteData(entry.URL().spec(), pb_entry->SerializeAsString()); @@ -137,7 +140,7 @@ } std::unique_ptr<ReadingListEntry> entry( - ReadingListEntry::FromReadingListLocal(proto)); + ReadingListEntry::FromReadingListLocal(proto, clock_->Now())); if (!entry) { continue; } @@ -218,7 +221,7 @@ kv.second.value().specifics.reading_list(); // Deserialize entry. std::unique_ptr<ReadingListEntry> entry( - ReadingListEntry::FromReadingListSpecifics(specifics)); + ReadingListEntry::FromReadingListSpecifics(specifics, clock_->Now())); const ReadingListEntry* existing_entry = model_->GetEntryByURL(entry->URL()); @@ -227,7 +230,7 @@ // This entry is new. Add it to the store and model. // Convert to local store format and write to store. std::unique_ptr<reading_list::ReadingListLocal> entry_pb = - entry->AsReadingListLocal(); + entry->AsReadingListLocal(clock_->Now()); batch_->WriteData(entry->URL().spec(), entry_pb->SerializeAsString()); // Notify model about updated entry. @@ -239,7 +242,7 @@ // Write to the store. std::unique_ptr<reading_list::ReadingListLocal> entry_local_pb = - merged_entry->AsReadingListLocal(); + merged_entry->AsReadingListLocal(clock_->Now()); batch_->WriteData(merged_entry->URL().spec(), entry_local_pb->SerializeAsString()); @@ -305,7 +308,7 @@ const sync_pb::ReadingListSpecifics& specifics = change.data().specifics.reading_list(); std::unique_ptr<ReadingListEntry> entry( - ReadingListEntry::FromReadingListSpecifics(specifics)); + ReadingListEntry::FromReadingListSpecifics(specifics, clock_->Now())); const ReadingListEntry* existing_entry = model_->GetEntryByURL(entry->URL()); @@ -314,7 +317,7 @@ // This entry is new. Add it to the store and model. // Convert to local store format and write to store. std::unique_ptr<reading_list::ReadingListLocal> entry_pb = - entry->AsReadingListLocal(); + entry->AsReadingListLocal(clock_->Now()); batch_->WriteData(entry->URL().spec(), entry_pb->SerializeAsString()); // Notify model about updated entry. @@ -326,7 +329,7 @@ // Write to the store. std::unique_ptr<reading_list::ReadingListLocal> entry_local_pb = - merged_entry->AsReadingListLocal(); + merged_entry->AsReadingListLocal(clock_->Now()); batch_->WriteData(merged_entry->URL().spec(), entry_local_pb->SerializeAsString());
diff --git a/components/reading_list/ios/reading_list_store.h b/components/reading_list/ios/reading_list_store.h index 2b78007..9852d791 100644 --- a/components/reading_list/ios/reading_list_store.h +++ b/components/reading_list/ios/reading_list_store.h
@@ -35,7 +35,8 @@ // ReadingListModelStorage implementation void SetReadingListModel(ReadingListModel* model, - ReadingListStoreDelegate* delegate) override; + ReadingListStoreDelegate* delegate, + base::Clock* clock) override; void SaveEntry(const ReadingListEntry& entry) override; void RemoveEntry(const ReadingListEntry& entry) override; @@ -164,6 +165,10 @@ int pending_transaction_count_; std::unique_ptr<syncer::ModelTypeStore::WriteBatch> batch_; + + base::Clock* clock_; + + DISALLOW_COPY_AND_ASSIGN(ReadingListStore); }; #endif // COMPONENTS_READING_LIST_IOS_READING_LIST_STORE_H_
diff --git a/components/reading_list/ios/reading_list_store_unittest.mm b/components/reading_list/ios/reading_list_store_unittest.cc similarity index 90% rename from components/reading_list/ios/reading_list_store_unittest.mm rename to components/reading_list/ios/reading_list_store_unittest.cc index 9ba369d4..b346c3a1 100644 --- a/components/reading_list/ios/reading_list_store_unittest.mm +++ b/components/reading_list/ios/reading_list_store_unittest.cc
@@ -11,12 +11,14 @@ #include "base/memory/ptr_util.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" -#import "base/test/ios/wait_util.h" +#include "base/test/simple_test_clock.h" #include "components/reading_list/ios/reading_list_model_impl.h" #include "components/sync/model/fake_model_type_change_processor.h" #include "components/sync/model/model_type_store_test_util.h" #include "testing/gtest/include/gtest/gtest.h" +namespace { + // Tests that the transition from |entryA| to |entryB| is possible (|possible| // is true) or not. void ExpectAB(const sync_pb::ReadingListSpecifics& entryA, @@ -24,9 +26,11 @@ bool possible) { EXPECT_EQ(ReadingListStore::CompareEntriesForSync(entryA, entryB), possible); std::unique_ptr<ReadingListEntry> a = - ReadingListEntry::FromReadingListSpecifics(entryA); + ReadingListEntry::FromReadingListSpecifics(entryA, + base::Time::FromTimeT(10)); std::unique_ptr<ReadingListEntry> b = - ReadingListEntry::FromReadingListSpecifics(entryB); + ReadingListEntry::FromReadingListSpecifics(entryB, + base::Time::FromTimeT(10)); a->MergeWithEntry(*b); std::unique_ptr<sync_pb::ReadingListSpecifics> mergedEntry = a->AsReadingListSpecifics(); @@ -41,6 +45,13 @@ } } +base::Time AdvanceAndGetTime(base::SimpleTestClock* clock) { + clock->Advance(base::TimeDelta::FromMilliseconds(10)); + return clock->Now(); +} + +} // namespace + class FakeModelTypeChangeProcessorObserver { public: virtual void Put(const std::string& client_tag, @@ -85,8 +96,11 @@ base::Passed(&store_)), base::Bind(&ReadingListStoreTest::CreateModelTypeChangeProcessor, base::Unretained(this))); - model_ = base::MakeUnique<ReadingListModelImpl>(nullptr, nullptr); - reading_list_store_->SetReadingListModel(model_.get(), this); + auto clock = base::MakeUnique<base::SimpleTestClock>(); + clock_ = clock.get(); + model_ = base::MakeUnique<ReadingListModelImpl>(nullptr, nullptr, + std::move(clock)); + reading_list_store_->SetReadingListModel(model_.get(), this, clock_); base::RunLoop().RunUntilIdle(); } @@ -163,6 +177,7 @@ std::unique_ptr<syncer::ModelTypeStore> store_; std::unique_ptr<ReadingListModelImpl> model_; + base::SimpleTestClock* clock_; std::unique_ptr<ReadingListStore> reading_list_store_; int put_called_; int delete_called_; @@ -181,8 +196,10 @@ } TEST_F(ReadingListStoreTest, SaveOneRead) { - ReadingListEntry entry(GURL("http://read.example.com/"), "read title"); - entry.SetRead(true); + ReadingListEntry entry(GURL("http://read.example.com/"), "read title", + AdvanceAndGetTime(clock_)); + entry.SetRead(true, AdvanceAndGetTime(clock_)); + AdvanceAndGetTime(clock_); reading_list_store_->SaveEntry(entry); AssertCounts(1, 0, 0, 0, 0); syncer::EntityData* data = put_multimap_["http://read.example.com/"].get(); @@ -194,7 +211,8 @@ } TEST_F(ReadingListStoreTest, SaveOneUnread) { - ReadingListEntry entry(GURL("http://unread.example.com/"), "unread title"); + ReadingListEntry entry(GURL("http://unread.example.com/"), "unread title", + AdvanceAndGetTime(clock_)); reading_list_store_->SaveEntry(entry); AssertCounts(1, 0, 0, 0, 0); syncer::EntityData* data = put_multimap_["http://unread.example.com/"].get(); @@ -207,8 +225,9 @@ TEST_F(ReadingListStoreTest, SyncMergeOneEntry) { syncer::EntityDataMap remote_input; - ReadingListEntry entry(GURL("http://read.example.com/"), "read title"); - entry.SetRead(true); + ReadingListEntry entry(GURL("http://read.example.com/"), "read title", + AdvanceAndGetTime(clock_)); + entry.SetRead(true, AdvanceAndGetTime(clock_)); std::unique_ptr<sync_pb::ReadingListSpecifics> specifics = entry.AsReadingListSpecifics(); @@ -230,8 +249,9 @@ TEST_F(ReadingListStoreTest, ApplySyncChangesOneAdd) { syncer::EntityDataMap remote_input; - ReadingListEntry entry(GURL("http://read.example.com/"), "read title"); - entry.SetRead(true); + ReadingListEntry entry(GURL("http://read.example.com/"), "read title", + AdvanceAndGetTime(clock_)); + entry.SetRead(true, AdvanceAndGetTime(clock_)); std::unique_ptr<sync_pb::ReadingListSpecifics> specifics = entry.AsReadingListSpecifics(); syncer::EntityData data; @@ -252,14 +272,13 @@ TEST_F(ReadingListStoreTest, ApplySyncChangesOneMerge) { syncer::EntityDataMap remote_input; + AdvanceAndGetTime(clock_); model_->AddEntry(GURL("http://unread.example.com/"), "unread title", reading_list::ADDED_VIA_CURRENT_APP); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(10)); - ReadingListEntry new_entry(GURL("http://unread.example.com/"), - "unread title"); - new_entry.SetRead(true); + ReadingListEntry new_entry(GURL("http://unread.example.com/"), "unread title", + AdvanceAndGetTime(clock_)); + new_entry.SetRead(true, AdvanceAndGetTime(clock_)); std::unique_ptr<sync_pb::ReadingListSpecifics> specifics = new_entry.AsReadingListSpecifics(); syncer::EntityData data; @@ -280,12 +299,11 @@ TEST_F(ReadingListStoreTest, ApplySyncChangesOneIgnored) { // Read entry but with unread URL as it must update the other one. ReadingListEntry old_entry(GURL("http://unread.example.com/"), - "old unread title"); - old_entry.SetRead(true); + "old unread title", AdvanceAndGetTime(clock_)); + old_entry.SetRead(true, AdvanceAndGetTime(clock_)); - base::test::ios::SpinRunLoopWithMinDelay( - base::TimeDelta::FromMilliseconds(10)); syncer::EntityDataMap remote_input; + AdvanceAndGetTime(clock_); model_->AddEntry(GURL("http://unread.example.com/"), "new unread title", reading_list::ADDED_VIA_CURRENT_APP); AssertCounts(0, 0, 0, 0, 0);
diff --git a/components/search_engines/template_url_service.cc b/components/search_engines/template_url_service.cc index b85e4f0..3bb83591 100644 --- a/components/search_engines/template_url_service.cc +++ b/components/search_engines/template_url_service.cc
@@ -1441,28 +1441,14 @@ DCHECK_NE(engine1, engine2); DCHECK_EQ(engine1->keyword(), engine2->keyword()); - std::string engine1_params = - base::StringPrintf("%s, %i, %" PRId64 ", %i, %s", - base::UTF16ToUTF8(engine1->keyword()).c_str(), - static_cast<int>(engine1->type()), engine1->id(), - engine1->prepopulate_id(), engine1->url().c_str()); - - std::string engine2_params = - base::StringPrintf("%s, %i, %" PRId64 ", %i, %s", - base::UTF16ToUTF8(engine2->keyword()).c_str(), - static_cast<int>(engine2->type()), engine2->id(), - engine2->prepopulate_id(), engine2->url().c_str()); - - base::debug::ScopedCrashKey scoped_crash_key1("engine1_params", - engine1_params); - base::debug::ScopedCrashKey scoped_crash_key2("engine2_params", - engine2_params); - // We should only have overlapping keywords when at least one comes from // an extension. - // TODO(a-v-y) Replace CHECK with DCHECK when reasons for crash - // https://bugs.chromium.org/p/chromium/issues/detail?id=697745 become clear. - CHECK(IsCreatedByExtension(engine1) || IsCreatedByExtension(engine2)); + DCHECK(IsCreatedByExtension(engine1) || IsCreatedByExtension(engine2)); + // TODO(a-v-y) Remove following code for non extension engines when reasons + // for crash https://bugs.chromium.org/p/chromium/issues/detail?id=697745 + // become clear. + if (!IsCreatedByExtension(engine1) && !IsCreatedByExtension(engine2)) + return CanReplace(engine1) ? engine2 : engine1; if (engine2->type() == engine1->type()) { return engine1->extension_info_->install_time > @@ -1591,23 +1577,6 @@ return turl->id() != kInvalidTemplateURLID; }); - // Check that no extension engines are coming from DB. - // TODO(a-v-y) Replace CHECK with DCHECK when reasons for crash - // https://bugs.chromium.org/p/chromium/issues/detail?id=697745 become clear. - for_each( - urls->begin(), urls->end(), [](const std::unique_ptr<TemplateURL>& turl) { - if (IsCreatedByExtension(turl.get())) { - std::string engine_params = - base::StringPrintf("%s, %i, %" PRId64 ", %i, %s", - base::UTF16ToUTF8(turl->keyword()).c_str(), - static_cast<int>(turl->type()), turl->id(), - turl->prepopulate_id(), turl->url().c_str()); - base::debug::ScopedCrashKey scoped_crash_key1("engine_params", - engine_params); - CHECK(false) << "Unexpected search engine type"; - } - }); - // First, add the items that already have id's, so that the next_id_ gets // properly set. for (auto i = urls->begin(); i != first_invalid; ++i) {
diff --git a/components/search_provider_logos/logo_tracker.cc b/components/search_provider_logos/logo_tracker.cc index 7bd11b5f..a2fc7f9 100644 --- a/components/search_provider_logos/logo_tracker.cc +++ b/components/search_provider_logos/logo_tracker.cc
@@ -261,6 +261,7 @@ } void LogoTracker::OnFreshLogoParsed(bool* parsing_failed, + bool from_http_cache, std::unique_ptr<EncodedLogo> logo) { DCHECK(!is_idle_); @@ -268,7 +269,8 @@ logo->metadata.source_url = logo_url_.spec(); if (!logo || !logo->encoded_image.get()) { - OnFreshLogoAvailable(std::move(logo), *parsing_failed, SkBitmap()); + OnFreshLogoAvailable(std::move(logo), *parsing_failed, from_http_cache, + SkBitmap()); } else { // Store the value of logo->encoded_image for use below. This ensures that // logo->encoded_image is evaulated before base::Passed(&logo), which sets @@ -277,15 +279,15 @@ logo_delegate_->DecodeUntrustedImage( encoded_image, base::Bind(&LogoTracker::OnFreshLogoAvailable, - weak_ptr_factory_.GetWeakPtr(), - base::Passed(&logo), - *parsing_failed)); + weak_ptr_factory_.GetWeakPtr(), base::Passed(&logo), + *parsing_failed, from_http_cache)); } } void LogoTracker::OnFreshLogoAvailable( std::unique_ptr<EncodedLogo> encoded_logo, bool parsing_failed, + bool from_http_cache, const SkBitmap& image) { DCHECK(!is_idle_); @@ -307,6 +309,8 @@ std::unique_ptr<Logo> logo; // Check if the server returned a valid, non-empty response. if (encoded_logo) { + UMA_HISTOGRAM_BOOLEAN("NewTabPage.LogoImageDownloaded", from_http_cache); + DCHECK(!image.isNull()); logo.reset(new Logo()); logo->metadata = encoded_logo->metadata; @@ -360,13 +364,16 @@ source->GetResponseAsString(response.get()); base::Time response_time = clock_->Now(); + bool from_http_cache = source->WasCached(); + bool* parsing_failed = new bool(false); base::PostTaskAndReplyWithResult( background_task_runner_.get(), FROM_HERE, base::Bind(parse_logo_response_func_, base::Passed(&response), response_time, parsing_failed), base::Bind(&LogoTracker::OnFreshLogoParsed, - weak_ptr_factory_.GetWeakPtr(), base::Owned(parsing_failed))); + weak_ptr_factory_.GetWeakPtr(), base::Owned(parsing_failed), + from_http_cache)); } void LogoTracker::OnURLFetchDownloadProgress(const net::URLFetcher* source,
diff --git a/components/search_provider_logos/logo_tracker.h b/components/search_provider_logos/logo_tracker.h index 10e033c0..402b640 100644 --- a/components/search_provider_logos/logo_tracker.h +++ b/components/search_provider_logos/logo_tracker.h
@@ -194,12 +194,14 @@ // Called when the logo has been downloaded and parsed. |logo| will be NULL // if the server's response was invalid. void OnFreshLogoParsed(bool* parsing_failed, + bool from_http_cache, std::unique_ptr<EncodedLogo> logo); // Called when the fresh logo has been decoded into an SkBitmap. |image| will // be NULL if decoding failed. void OnFreshLogoAvailable(std::unique_ptr<EncodedLogo> logo, bool parsing_failed, + bool from_http_cache, const SkBitmap& image); // net::URLFetcherDelegate:
diff --git a/components/signin/core/browser/signin_header_helper_unittest.cc b/components/signin/core/browser/signin_header_helper_unittest.cc index 239b0a57..09d66c78 100644 --- a/components/signin/core/browser/signin_header_helper_unittest.cc +++ b/components/signin/core/browser/signin_header_helper_unittest.cc
@@ -117,11 +117,13 @@ // Tests that the Mirror request is returned with the GAIA Id on Drive origin, // even if account consistency is disabled. +// +// Account consistency if always enabled on Android and iOS, so this test is +// only relevant on Desktop. +#if !defined(OS_ANDROID) && !defined(OS_IOS) TEST_F(SigninHeaderHelperTest, TestMirrorRequestDrive) { DCHECK(!base::CommandLine::ForCurrentProcess()->HasSwitch( switches::kEnableAccountConsistency)); - base::CommandLine::ForCurrentProcess()->AppendSwitch( - switches::kDisableAccountConsistency); CheckMirrorHeaderRequest( GURL("https://docs.google.com/document"), "0123456789", "id=0123456789,mode=0,enable_account_consistency=false"); @@ -139,6 +141,7 @@ GURL("https://drive.google.com/drive"), "0123456789", "id=0123456789:mode=0:enable_account_consistency=true"); } +#endif // Tests that the Mirror header request is returned normally when the redirect // URL is eligible.
diff --git a/components/signin/core/common/profile_management_switches.cc b/components/signin/core/common/profile_management_switches.cc index 3ab4f8f..e9f883cf 100644 --- a/components/signin/core/common/profile_management_switches.cc +++ b/components/signin/core/common/profile_management_switches.cc
@@ -12,114 +12,32 @@ #include "build/build_config.h" #include "components/signin/core/common/signin_switches.h" -namespace { - -const char kNewProfileManagementFieldTrialName[] = "NewProfileManagement"; - -// Different state of new profile management/identity consistency. The code -// below assumes the order of the values in this enum. That is, new profile -// management is included in consistent identity. -enum State { - STATE_NEW_AVATAR_MENU, - STATE_NEW_PROFILE_MANAGEMENT, - STATE_ACCOUNT_CONSISTENCY, -}; - -State GetProcessState() { - // Find the state of both command line args. - bool is_new_profile_management = - base::CommandLine::ForCurrentProcess()->HasSwitch( - switches::kEnableNewProfileManagement); - bool is_consistent_identity = - base::CommandLine::ForCurrentProcess()->HasSwitch( - switches::kEnableAccountConsistency); - bool not_new_profile_management = - base::CommandLine::ForCurrentProcess()->HasSwitch( - switches::kDisableNewProfileManagement); - bool not_consistent_identity = - base::CommandLine::ForCurrentProcess()->HasSwitch( - switches::kDisableAccountConsistency); - int count_args = (is_new_profile_management ? 1 : 0) + - (is_consistent_identity ? 1 : 0) + - (not_new_profile_management ? 1 : 0) + - (not_consistent_identity ? 1 : 0); - bool invalid_commandline = count_args > 1; - - // At most only one of the command line args should be specified, otherwise - // the finch group assignment is undefined. If this is the case, disable - // the field trial so that data is not collected in the wrong group. - std::string trial_type; - if (invalid_commandline) { - base::FieldTrial* field_trial = - base::FieldTrialList::Find(kNewProfileManagementFieldTrialName); - if (field_trial) - field_trial->Disable(); - - trial_type.clear(); - } else { - // Since the experiment is not being disabled, get the full name of the - // field trial which will initialize the underlying mechanism. - trial_type = - base::FieldTrialList::FindFullName(kNewProfileManagementFieldTrialName); - } - - // Enable command line args take precedent over disable command line args. - // Consistent identity args take precedent over new profile management args. - if (is_consistent_identity) { - return STATE_ACCOUNT_CONSISTENCY; - } else if (is_new_profile_management) { - return STATE_NEW_PROFILE_MANAGEMENT; - } else if (not_new_profile_management) { - return STATE_NEW_AVATAR_MENU; - } else if (not_consistent_identity) { - return STATE_NEW_PROFILE_MANAGEMENT; - } - - // Set the default state -#if defined(OS_ANDROID) || defined(OS_IOS) - State state = STATE_ACCOUNT_CONSISTENCY; -#else - State state = STATE_NEW_PROFILE_MANAGEMENT; -#endif - - if (!trial_type.empty()) { - if (trial_type == "Enabled") { - state = STATE_NEW_PROFILE_MANAGEMENT; - } else if (trial_type == "AccountConsistency") { - state = STATE_ACCOUNT_CONSISTENCY; - } else if (trial_type == "NewAvatarMenu") { - state = STATE_NEW_AVATAR_MENU; - } else { - state = STATE_NEW_PROFILE_MANAGEMENT; - } - } - - return state; -} - -bool CheckFlag(const std::string& command_switch, State min_state) { - // Individiual flag settings take precedence. - if (base::CommandLine::ForCurrentProcess()->HasSwitch(command_switch)) - return true; - - return GetProcessState() >= min_state; -} - -} // namespace - namespace switches { bool IsEnableAccountConsistency() { - return GetProcessState() >= STATE_ACCOUNT_CONSISTENCY; +#if defined(OS_ANDROID) || defined(OS_IOS) + // Account consistency is enabled on Android and iOS. + return true; +#endif + + return base::CommandLine::ForCurrentProcess()->HasSwitch( + switches::kEnableAccountConsistency); } bool IsExtensionsMultiAccount() { - return CheckFlag(switches::kExtensionsMultiAccount, - STATE_ACCOUNT_CONSISTENCY); +#if defined(OS_ANDROID) || defined(OS_IOS) + NOTREACHED() << "Extensions are not enabled on Android or iOS"; + // Account consistency is enabled on Android and iOS. + return false; +#endif + + return base::CommandLine::ForCurrentProcess()->HasSwitch( + switches::kExtensionsMultiAccount) || + IsEnableAccountConsistency(); } bool IsNewProfileManagement() { - return GetProcessState() >= STATE_NEW_PROFILE_MANAGEMENT; + return true; } bool UsePasswordSeparatedSigninFlow() { @@ -128,13 +46,11 @@ } void EnableNewProfileManagementForTesting(base::CommandLine* command_line) { - command_line->AppendSwitch(switches::kEnableNewProfileManagement); - DCHECK(!command_line->HasSwitch(switches::kDisableNewProfileManagement)); + // No-op as new profile management is always enabled. } void EnableAccountConsistencyForTesting(base::CommandLine* command_line) { command_line->AppendSwitch(switches::kEnableAccountConsistency); - DCHECK(!command_line->HasSwitch(switches::kDisableAccountConsistency)); } } // namespace switches
diff --git a/components/signin/core/common/signin_switches.cc b/components/signin/core/common/signin_switches.cc index 5657dea..0473a9a 100644 --- a/components/signin/core/common/signin_switches.cc +++ b/components/signin/core/common/signin_switches.cc
@@ -10,21 +10,12 @@ // expiration of credentials during testing. const char kClearTokenService[] = "clear-token-service"; -// Disables consistent identity features. -const char kDisableAccountConsistency[] = "disable-account-consistency"; - -// Disables new profile management system, including new profile chooser UI. -const char kDisableNewProfileManagement[] = "disable-new-profile-management"; - // Disables sending signin scoped device id to LSO with refresh token request. const char kDisableSigninScopedDeviceId[] = "disable-signin-scoped-device-id"; // Enables consistent identity features. const char kEnableAccountConsistency[] = "enable-account-consistency"; -// Enables new profile management system, including lock mode. -const char kEnableNewProfileManagement[] = "new-profile-management"; - // Enables sending EnableRefreshTokenAnnotationRequest. extern const char kEnableRefreshTokenAnnotationRequest[] = "enable-refresh-token-annotation-request";
diff --git a/components/signin/core/common/signin_switches.h b/components/signin/core/common/signin_switches.h index 96afbaf..1c691257 100644 --- a/components/signin/core/common/signin_switches.h +++ b/components/signin/core/common/signin_switches.h
@@ -16,11 +16,8 @@ // All switches in alphabetical order. The switches should be documented // alongside the definition of their values in the .cc file. extern const char kClearTokenService[]; -extern const char kDisableAccountConsistency[]; -extern const char kDisableNewProfileManagement[]; extern const char kDisableSigninScopedDeviceId[]; extern const char kEnableAccountConsistency[]; -extern const char kEnableNewProfileManagement[]; extern const char kEnableRefreshTokenAnnotationRequest[]; extern const char kExtensionsMultiAccount[];
diff --git a/content/browser/webrtc/webrtc_audio_debug_recordings_browsertest.cc b/content/browser/webrtc/webrtc_audio_debug_recordings_browsertest.cc index 1b7f947..4129ac8e7 100644 --- a/content/browser/webrtc/webrtc_audio_debug_recordings_browsertest.cc +++ b/content/browser/webrtc/webrtc_audio_debug_recordings_browsertest.cc
@@ -167,13 +167,10 @@ for (int i = 0; i < 2; ++i) { file_path = GetExpectedOutputAudioFileName( base_file_path, kExpectedFirstOutputStreamId + i); -// Flaky on Mac. http://crbug.com/700859. -#if !defined(OS_MACOSX) EXPECT_TRUE(base::PathExists(file_path)); file_size = 0; EXPECT_TRUE(base::GetFileSize(file_path, &file_size)); EXPECT_GT(file_size, kWaveHeaderSizeBytes); -#endif EXPECT_TRUE(base::DeleteFile(file_path, false)); }
diff --git a/content/renderer/mus/renderer_window_tree_client.cc b/content/renderer/mus/renderer_window_tree_client.cc index 44474e0..6940056 100644 --- a/content/renderer/mus/renderer_window_tree_client.cc +++ b/content/renderer/mus/renderer_window_tree_client.cc
@@ -41,6 +41,27 @@ client->DestroySelf(); } +void RendererWindowTreeClient::Bind( + ui::mojom::WindowTreeClientRequest request) { + binding_.Bind(std::move(request)); +} + +void RendererWindowTreeClient::RequestCompositorFrameSink( + scoped_refptr<cc::ContextProvider> context_provider, + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback) { + DCHECK(pending_compositor_frame_sink_callback_.is_null()); + if (frame_sink_id_.is_valid()) { + RequestCompositorFrameSinkInternal(std::move(context_provider), + gpu_memory_buffer_manager, callback); + return; + } + + pending_context_provider_ = std::move(context_provider); + pending_gpu_memory_buffer_manager_ = gpu_memory_buffer_manager; + pending_compositor_frame_sink_callback_ = callback; +} + RendererWindowTreeClient::RendererWindowTreeClient(int routing_id) : routing_id_(routing_id), binding_(this) {} @@ -48,28 +69,18 @@ g_connections.Get().erase(routing_id_); } -void RendererWindowTreeClient::Bind( - ui::mojom::WindowTreeClientRequest request) { - binding_.Bind(std::move(request)); -} - -std::unique_ptr<cc::CompositorFrameSink> -RendererWindowTreeClient::CreateCompositorFrameSink( - const cc::FrameSinkId& frame_sink_id, +void RendererWindowTreeClient::RequestCompositorFrameSinkInternal( scoped_refptr<cc::ContextProvider> context_provider, - gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) { + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback) { std::unique_ptr<ui::ClientCompositorFrameSinkBinding> frame_sink_binding; auto frame_sink = ui::ClientCompositorFrameSink::Create( - frame_sink_id, std::move(context_provider), gpu_memory_buffer_manager, + frame_sink_id_, std::move(context_provider), gpu_memory_buffer_manager, &frame_sink_binding); - if (tree_) { - tree_->AttachCompositorFrameSink( - root_window_id_, frame_sink_binding->TakeFrameSinkRequest(), - mojo::MakeProxy(frame_sink_binding->TakeFrameSinkClient())); - } else { - pending_frame_sink_ = std::move(frame_sink_binding); - } - return std::move(frame_sink); + tree_->AttachCompositorFrameSink( + root_window_id_, frame_sink_binding->TakeFrameSinkRequest(), + mojo::MakeProxy(frame_sink_binding->TakeFrameSinkClient())); + callback.Run(std::move(frame_sink)); } void RendererWindowTreeClient::DestroySelf() { @@ -81,14 +92,18 @@ ui::mojom::WindowTreePtr tree, int64_t display_id, ui::Id focused_window_id, - bool drawn) { + bool drawn, + const cc::FrameSinkId& frame_sink_id) { + frame_sink_id_ = frame_sink_id; root_window_id_ = root->window_id; tree_ = std::move(tree); - if (pending_frame_sink_) { - tree_->AttachCompositorFrameSink( - root_window_id_, pending_frame_sink_->TakeFrameSinkRequest(), - mojo::MakeProxy(pending_frame_sink_->TakeFrameSinkClient())); - pending_frame_sink_ = nullptr; + if (!pending_compositor_frame_sink_callback_.is_null()) { + RequestCompositorFrameSinkInternal(std::move(pending_context_provider_), + pending_gpu_memory_buffer_manager_, + pending_compositor_frame_sink_callback_); + pending_context_provider_ = nullptr; + pending_gpu_memory_buffer_manager_ = nullptr; + pending_compositor_frame_sink_callback_.Reset(); } } @@ -104,10 +119,12 @@ void RendererWindowTreeClient::OnCaptureChanged(ui::Id new_capture_window_id, ui::Id old_capture_window_id) {} -void RendererWindowTreeClient::OnTopLevelCreated(uint32_t change_id, - ui::mojom::WindowDataPtr data, - int64_t display_id, - bool drawn) { +void RendererWindowTreeClient::OnTopLevelCreated( + uint32_t change_id, + ui::mojom::WindowDataPtr data, + int64_t display_id, + bool drawn, + const cc::FrameSinkId& frame_sink_id) { NOTREACHED(); }
diff --git a/content/renderer/mus/renderer_window_tree_client.h b/content/renderer/mus/renderer_window_tree_client.h index d94094d0..3241ca48 100644 --- a/content/renderer/mus/renderer_window_tree_client.h +++ b/content/renderer/mus/renderer_window_tree_client.h
@@ -19,10 +19,6 @@ class GpuMemoryBufferManager; } -namespace ui { -class ClientCompositorFrameSinkBinding; -} - namespace content { // ui.mojom.WindowTreeClient implementation for RenderWidget. This lives and @@ -44,15 +40,22 @@ void Bind(ui::mojom::WindowTreeClientRequest request); - std::unique_ptr<cc::CompositorFrameSink> CreateCompositorFrameSink( - const cc::FrameSinkId& frame_sink_id, + using CompositorFrameSinkCallback = + base::Callback<void(std::unique_ptr<cc::CompositorFrameSink>)>; + void RequestCompositorFrameSink( scoped_refptr<cc::ContextProvider> context_provider, - gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager); + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback); private: explicit RendererWindowTreeClient(int routing_id); ~RendererWindowTreeClient() override; + void RequestCompositorFrameSinkInternal( + scoped_refptr<cc::ContextProvider> context_provider, + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback); + void DestroySelf(); // ui::mojom::WindowTreeClient: @@ -65,7 +68,8 @@ ui::mojom::WindowTreePtr tree, int64_t display_id, ui::Id focused_window_id, - bool drawn) override; + bool drawn, + const cc::FrameSinkId& frame_sink_id) override; void OnEmbeddedAppDisconnected(ui::Id window_id) override; void OnUnembed(ui::Id window_id) override; void OnCaptureChanged(ui::Id new_capture_window_id, @@ -73,7 +77,8 @@ void OnTopLevelCreated(uint32_t change_id, ui::mojom::WindowDataPtr data, int64_t display_id, - bool drawn) override; + bool drawn, + const cc::FrameSinkId& frame_sink_id) override; void OnWindowBoundsChanged( ui::Id window_id, const gfx::Rect& old_bounds, @@ -149,8 +154,11 @@ const int routing_id_; ui::Id root_window_id_; + cc::FrameSinkId frame_sink_id_; + scoped_refptr<cc::ContextProvider> pending_context_provider_; + gpu::GpuMemoryBufferManager* pending_gpu_memory_buffer_manager_ = nullptr; + CompositorFrameSinkCallback pending_compositor_frame_sink_callback_; ui::mojom::WindowTreePtr tree_; - std::unique_ptr<ui::ClientCompositorFrameSinkBinding> pending_frame_sink_; mojo::Binding<ui::mojom::WindowTreeClient> binding_; DISALLOW_COPY_AND_ASSIGN(RendererWindowTreeClient);
diff --git a/content/renderer/render_thread_impl.cc b/content/renderer/render_thread_impl.cc index 951c1af..408fe12 100644 --- a/content/renderer/render_thread_impl.cc +++ b/content/renderer/render_thread_impl.cc
@@ -1889,11 +1889,10 @@ callback.Run(nullptr); return; } - callback.Run(RendererWindowTreeClient::Get(routing_id) - ->CreateCompositorFrameSink( - cc::FrameSinkId(client_id_, routing_id), - gpu_->CreateContextProvider(std::move(channel)), - GetGpuMemoryBufferManager())); + RendererWindowTreeClient::Get(routing_id) + ->RequestCompositorFrameSink( + gpu_->CreateContextProvider(std::move(channel)), + GetGpuMemoryBufferManager(), callback); return; } #endif
diff --git a/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensor.java b/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensor.java index 1d825fe3..ea60b3d7f 100644 --- a/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensor.java +++ b/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensor.java
@@ -205,9 +205,9 @@ * Updates reading at native device::PlatformSensorAndroid. */ protected void updateSensorReading( - double timestamp, double value1, double value2, double value3) { + double timestamp, double value1, double value2, double value3, double value4) { nativeUpdatePlatformSensorReading( - mNativePlatformSensorAndroid, timestamp, value1, value2, value3); + mNativePlatformSensorAndroid, timestamp, value1, value2, value3, value4); } @Override @@ -224,17 +224,22 @@ double timestamp = event.timestamp * SECONDS_IN_NANOSECOND; switch (event.values.length) { case 1: - updateSensorReading(timestamp, event.values[0], 0.0, 0.0); + updateSensorReading(timestamp, event.values[0], 0.0, 0.0, 0.0); break; case 2: - updateSensorReading(timestamp, event.values[0], event.values[1], 0.0); + updateSensorReading(timestamp, event.values[0], event.values[1], 0.0, 0.0); + break; + case 3: + updateSensorReading( + timestamp, event.values[0], event.values[1], event.values[2], 0.0); break; default: - updateSensorReading(timestamp, event.values[0], event.values[1], event.values[2]); + updateSensorReading(timestamp, event.values[0], event.values[1], event.values[2], + event.values[3]); } } private native void nativeNotifyPlatformSensorError(long nativePlatformSensorAndroid); private native void nativeUpdatePlatformSensorReading(long nativePlatformSensorAndroid, - double timestamp, double value1, double value2, double value3); + double timestamp, double value1, double value2, double value3, double value4); }
diff --git a/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensorProvider.java b/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensorProvider.java index f5bfa11..5b963af 100644 --- a/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensorProvider.java +++ b/device/generic_sensor/android/java/src/org/chromium/device/sensors/PlatformSensorProvider.java
@@ -150,6 +150,8 @@ return PlatformSensor.create(Sensor.TYPE_GYROSCOPE, 3, this); case SensorType.MAGNETOMETER: return PlatformSensor.create(Sensor.TYPE_MAGNETIC_FIELD, 3, this); + case SensorType.ABSOLUTE_ORIENTATION: + return PlatformSensor.create(Sensor.TYPE_ROTATION_VECTOR, 4, this); default: return null; }
diff --git a/device/generic_sensor/android/junit/src/org/chromium/device/sensors/PlatformSensorAndProviderTest.java b/device/generic_sensor/android/junit/src/org/chromium/device/sensors/PlatformSensorAndProviderTest.java index f8271fb1..0c747f639 100644 --- a/device/generic_sensor/android/junit/src/org/chromium/device/sensors/PlatformSensorAndProviderTest.java +++ b/device/generic_sensor/android/junit/src/org/chromium/device/sensors/PlatformSensorAndProviderTest.java
@@ -91,7 +91,7 @@ @Override protected void updateSensorReading( - double timestamp, double value1, double value2, double value3) {} + double timestamp, double value1, double value2, double value3, double value4) {} @Override protected void sensorError() {} } @@ -159,6 +159,8 @@ verify(mSensorManager).getSensorList(Sensor.TYPE_GYROSCOPE); provider.createSensor(SensorType.MAGNETOMETER); verify(mSensorManager).getSensorList(Sensor.TYPE_MAGNETIC_FIELD); + provider.createSensor(SensorType.ABSOLUTE_ORIENTATION); + verify(mSensorManager).getSensorList(Sensor.TYPE_ROTATION_VECTOR); } /** @@ -289,7 +291,7 @@ double timestamp = PLATFORM_SENSOR_TIMESTAMP * SECONDS_IN_NANOSECOND; verify(spySensor, times(1)) - .updateSensorReading(timestamp, getFakeReadingValue(1), 0.0, 0.0); + .updateSensorReading(timestamp, getFakeReadingValue(1), 0.0, 0.0, 0.0); } /**
diff --git a/device/generic_sensor/platform_sensor_android.cc b/device/generic_sensor/platform_sensor_android.cc index 13b633e0..c2e0c87f 100644 --- a/device/generic_sensor/platform_sensor_android.cc +++ b/device/generic_sensor/platform_sensor_android.cc
@@ -85,12 +85,14 @@ jdouble timestamp, jdouble value1, jdouble value2, - jdouble value3) { + jdouble value3, + jdouble value4) { SensorReading reading; reading.timestamp = timestamp; reading.values[0] = value1; reading.values[1] = value2; reading.values[2] = value3; + reading.values[3] = value4; bool needNotify = (GetReportingMode() == mojom::ReportingMode::ON_CHANGE); UpdateSensorReading(reading, needNotify);
diff --git a/device/generic_sensor/platform_sensor_android.h b/device/generic_sensor/platform_sensor_android.h index 82575e0..48937e0b 100644 --- a/device/generic_sensor/platform_sensor_android.h +++ b/device/generic_sensor/platform_sensor_android.h
@@ -34,7 +34,8 @@ jdouble timestamp, jdouble value1, jdouble value2, - jdouble value3); + jdouble value3, + jdouble value4); protected: ~PlatformSensorAndroid() override;
diff --git a/device/generic_sensor/public/cpp/sensor_reading.h b/device/generic_sensor/public/cpp/sensor_reading.h index 90848d7..81e2f768 100644 --- a/device/generic_sensor/public/cpp/sensor_reading.h +++ b/device/generic_sensor/public/cpp/sensor_reading.h
@@ -39,13 +39,13 @@ Storage storage_; }; -// This structure represents sensor reading data: timestamp and 3 values. +// This structure represents sensor reading data: timestamp and 4 values. struct DEVICE_GENERIC_SENSOR_PUBLIC_EXPORT SensorReading { SensorReading(); ~SensorReading(); SensorReading(const SensorReading& other); SensorReadingField<double> timestamp; - constexpr static int kValuesCount = 3; + constexpr static int kValuesCount = 4; SensorReadingField<double> values[kValuesCount]; };
diff --git a/device/generic_sensor/public/interfaces/sensor.mojom b/device/generic_sensor/public/interfaces/sensor.mojom index be71501..3a6bfb53 100644 --- a/device/generic_sensor/public/interfaces/sensor.mojom +++ b/device/generic_sensor/public/interfaces/sensor.mojom
@@ -14,7 +14,8 @@ GYROSCOPE, MAGNETOMETER, PRESSURE, - LAST = PRESSURE // Note: LAST is also equal to the types count. + ABSOLUTE_ORIENTATION, + LAST = ABSOLUTE_ORIENTATION // Note: LAST is also equal to the types count. }; // Reporting mode supported by the Sensor.
diff --git a/device/generic_sensor/public/interfaces/sensor_provider.mojom b/device/generic_sensor/public/interfaces/sensor_provider.mojom index 8e9a3c2..d9fa895 100644 --- a/device/generic_sensor/public/interfaces/sensor_provider.mojom +++ b/device/generic_sensor/public/interfaces/sensor_provider.mojom
@@ -27,10 +27,10 @@ // Minimum sampling frequency for the sensor. double minimum_frequency; - // Each sensor's read buffer contains 4 tightly packed 64-bit floating + // Each sensor's read buffer contains 5 tightly packed 64-bit floating // point fields (please see sensor_reading.h) and a seqlock, so its size is - // 5 * 8 = 40 bytes. - const uint64 kReadBufferSizeForTests = 40; + // 6 * 8 = 48 bytes. + const uint64 kReadBufferSizeForTests = 48; }; interface SensorProvider {
diff --git a/extensions/browser/api/declarative/rules_cache_delegate.cc b/extensions/browser/api/declarative/rules_cache_delegate.cc index b2976d9..9601c05 100644 --- a/extensions/browser/api/declarative/rules_cache_delegate.cc +++ b/extensions/browser/api/declarative/rules_cache_delegate.cc
@@ -6,6 +6,7 @@ #include <utility> +#include "base/memory/ptr_util.h" #include "content/public/browser/browser_context.h" #include "content/public/browser/notification_details.h" #include "content/public/browser/notification_source.h" @@ -223,8 +224,9 @@ ->GetExtensionById(extension_id, ExtensionRegistry::EVERYTHING)); ExtensionScopedPrefs* extension_prefs = ExtensionPrefs::Get(browser_context_); - extension_prefs->UpdateExtensionPref(extension_id, rules_stored_key_, - new base::Value(rules_stored)); + extension_prefs->UpdateExtensionPref( + extension_id, rules_stored_key_, + base::MakeUnique<base::Value>(rules_stored)); } } // namespace extensions
diff --git a/extensions/browser/api/device_permissions_manager.cc b/extensions/browser/api/device_permissions_manager.cc index f8eebb1..427b56a 100644 --- a/extensions/browser/api/device_permissions_manager.cc +++ b/extensions/browser/api/device_permissions_manager.cc
@@ -183,7 +183,7 @@ // Clears all DevicePermissionEntries for the app from ExtensionPrefs. void ClearDevicePermissionEntries(ExtensionPrefs* prefs, const std::string& extension_id) { - prefs->UpdateExtensionPref(extension_id, kDevices, NULL); + prefs->UpdateExtensionPref(extension_id, kDevices, nullptr); } scoped_refptr<DevicePermissionEntry> ReadDevicePermissionEntry(
diff --git a/extensions/browser/api/runtime/runtime_api.cc b/extensions/browser/api/runtime/runtime_api.cc index 6fa8494f..dbd9e915 100644 --- a/extensions/browser/api/runtime/runtime_api.cc +++ b/extensions/browser/api/runtime/runtime_api.cc
@@ -156,7 +156,7 @@ const std::string& extension_id, const std::string& url_string) { prefs->UpdateExtensionPref(extension_id, kUninstallUrl, - new base::Value(url_string)); + base::MakeUnique<base::Value>(url_string)); } std::string GetUninstallURL(ExtensionPrefs* prefs, @@ -327,7 +327,7 @@ previous_version.IsValid() ? previous_version.GetString() : ""); prefs->UpdateExtensionPref(extension->id(), kPrefPendingOnInstalledEventDispatchInfo, - pending_on_install_info.release()); + std::move(pending_on_install_info)); } void RuntimeAPI::ReloadExtension(const std::string& extension_id) {
diff --git a/extensions/browser/event_router.cc b/extensions/browser/event_router.cc index 0dc125a..67eb463 100644 --- a/extensions/browser/event_router.cc +++ b/extensions/browser/event_router.cc
@@ -378,13 +378,13 @@ void EventRouter::SetRegisteredEvents(const std::string& extension_id, const std::set<std::string>& events) { - ListValue* events_value = new ListValue; + auto events_value = base::MakeUnique<base::ListValue>(); for (std::set<std::string>::const_iterator iter = events.begin(); iter != events.end(); ++iter) { events_value->AppendString(*iter); } - extension_prefs_->UpdateExtensionPref( - extension_id, kRegisteredEvents, events_value); + extension_prefs_->UpdateExtensionPref(extension_id, kRegisteredEvents, + std::move(events_value)); } void EventRouter::AddFilterToEvent(const std::string& event_name,
diff --git a/extensions/browser/extension_prefs.cc b/extensions/browser/extension_prefs.cc index 8d08043..01ca8d3c 100644 --- a/extensions/browser/extension_prefs.cc +++ b/extensions/browser/extension_prefs.cc
@@ -441,16 +441,17 @@ return extension_dict; } -void ExtensionPrefs::UpdateExtensionPref(const std::string& extension_id, - const std::string& key, - base::Value* data_value) { +void ExtensionPrefs::UpdateExtensionPref( + const std::string& extension_id, + const std::string& key, + std::unique_ptr<base::Value> data_value) { if (!crx_file::id_util::IdIsValid(extension_id)) { NOTREACHED() << "Invalid extension_id " << extension_id; return; } ScopedExtensionPrefUpdate update(prefs_, extension_id); if (data_value) - update->Set(key, data_value); + update->Set(key, std::move(data_value)); else update->Remove(key, NULL); } @@ -550,7 +551,7 @@ const std::string& extension_id, const std::string& pref_key, const URLPatternSet& new_value) { - UpdateExtensionPref(extension_id, pref_key, new_value.ToValue().release()); + UpdateExtensionPref(extension_id, pref_key, new_value.ToValue()); } bool ExtensionPrefs::ReadPrefAsBooleanAndReturn( @@ -613,9 +614,10 @@ // // permission supports detail, permission detail will be stored in value. // ... // ] -template<typename T> -static base::ListValue* CreatePermissionList(const T& permissions) { - base::ListValue* values = new base::ListValue(); +template <typename T> +static std::unique_ptr<base::ListValue> CreatePermissionList( + const T& permissions) { + auto values = base::MakeUnique<base::ListValue>(); for (typename T::const_iterator i = permissions.begin(); i != permissions.end(); ++i) { std::unique_ptr<base::Value> detail(i->ToValue()); @@ -635,16 +637,13 @@ const std::string& pref_key, const PermissionSet& new_value) { std::string api_pref = JoinPrefs(pref_key, kPrefAPIs); - base::ListValue* api_values = CreatePermissionList(new_value.apis()); - UpdateExtensionPref(extension_id, api_pref, api_values); + UpdateExtensionPref(extension_id, api_pref, + CreatePermissionList(new_value.apis())); std::string manifest_permissions_pref = JoinPrefs(pref_key, kPrefManifestPermissions); - base::ListValue* manifest_permissions_values = - CreatePermissionList(new_value.manifest_permissions()); - UpdateExtensionPref(extension_id, - manifest_permissions_pref, - manifest_permissions_values); + UpdateExtensionPref(extension_id, manifest_permissions_pref, + CreatePermissionList(new_value.manifest_permissions())); // Set the explicit host permissions. if (!new_value.explicit_hosts().is_empty()) { @@ -667,7 +666,7 @@ ReadPrefAsInteger(extension_id, kPrefAcknowledgePromptCount, &count); ++count; UpdateExtensionPref(extension_id, kPrefAcknowledgePromptCount, - new base::Value(count)); + base::MakeUnique<base::Value>(count)); return count; } @@ -680,8 +679,8 @@ const std::string& extension_id) { DCHECK(crx_file::id_util::IdIsValid(extension_id)); UpdateExtensionPref(extension_id, kPrefExternalAcknowledged, - new base::Value(true)); - UpdateExtensionPref(extension_id, kPrefAcknowledgePromptCount, NULL); + base::MakeUnique<base::Value>(true)); + UpdateExtensionPref(extension_id, kPrefAcknowledgePromptCount, nullptr); } bool ExtensionPrefs::IsBlacklistedExtensionAcknowledged( @@ -693,8 +692,8 @@ const std::string& extension_id) { DCHECK(crx_file::id_util::IdIsValid(extension_id)); UpdateExtensionPref(extension_id, kPrefBlacklistAcknowledged, - new base::Value(true)); - UpdateExtensionPref(extension_id, kPrefAcknowledgePromptCount, NULL); + base::MakeUnique<base::Value>(true)); + UpdateExtensionPref(extension_id, kPrefAcknowledgePromptCount, nullptr); } bool ExtensionPrefs::IsExternalInstallFirstRun( @@ -706,7 +705,7 @@ const std::string& extension_id) { DCHECK(crx_file::id_util::IdIsValid(extension_id)); UpdateExtensionPref(extension_id, kPrefExternalInstallFirstRun, - new base::Value(true)); + base::MakeUnique<base::Value>(true)); } bool ExtensionPrefs::SetAlertSystemFirstRun() { @@ -791,10 +790,10 @@ return; if (new_value == Extension::DISABLE_NONE) { - UpdateExtensionPref(extension_id, kPrefDisableReasons, NULL); + UpdateExtensionPref(extension_id, kPrefDisableReasons, nullptr); } else { UpdateExtensionPref(extension_id, kPrefDisableReasons, - new base::Value(new_value)); + base::MakeUnique<base::Value>(new_value)); } for (auto& observer : observer_list_) @@ -832,12 +831,13 @@ // Always make sure the "acknowledged" bit is cleared since the blacklist bit // is changing. - UpdateExtensionPref(extension_id, kPrefBlacklistAcknowledged, NULL); + UpdateExtensionPref(extension_id, kPrefBlacklistAcknowledged, nullptr); if (is_blacklisted) { - UpdateExtensionPref(extension_id, kPrefBlacklist, new base::Value(true)); + UpdateExtensionPref(extension_id, kPrefBlacklist, + base::MakeUnique<base::Value>(true)); } else { - UpdateExtensionPref(extension_id, kPrefBlacklist, NULL); + UpdateExtensionPref(extension_id, kPrefBlacklist, nullptr); const base::DictionaryValue* dict = GetExtensionPref(extension_id); if (dict && dict->empty()) DeleteExtensionPrefs(extension_id); @@ -940,7 +940,8 @@ void ExtensionPrefs::SetActiveBit(const std::string& extension_id, bool active) { - UpdateExtensionPref(extension_id, kActiveBit, new base::Value(active)); + UpdateExtensionPref(extension_id, kActiveBit, + base::MakeUnique<base::Value>(active)); } std::unique_ptr<const PermissionSet> ExtensionPrefs::GetGrantedPermissions( @@ -990,8 +991,8 @@ void ExtensionPrefs::SetExtensionRunning(const std::string& extension_id, bool is_running) { - base::Value* value = new base::Value(is_running); - UpdateExtensionPref(extension_id, kPrefRunning, value); + UpdateExtensionPref(extension_id, kPrefRunning, + base::MakeUnique<base::Value>(is_running)); } bool ExtensionPrefs::IsExtensionRunning(const std::string& extension_id) const { @@ -1005,8 +1006,8 @@ void ExtensionPrefs::SetIsActive(const std::string& extension_id, bool is_active) { - base::Value* value = new base::Value(is_active); - UpdateExtensionPref(extension_id, kIsActive, value); + UpdateExtensionPref(extension_id, kIsActive, + base::MakeUnique<base::Value>(is_active)); } bool ExtensionPrefs::IsActive(const std::string& extension_id) const { @@ -1025,7 +1026,7 @@ void ExtensionPrefs::SetIsIncognitoEnabled(const std::string& extension_id, bool enabled) { UpdateExtensionPref(extension_id, kPrefIncognitoEnabled, - new base::Value(enabled)); + base::MakeUnique<base::Value>(enabled)); extension_pref_value_map_->SetExtensionIncognitoState(extension_id, enabled); } @@ -1036,7 +1037,7 @@ void ExtensionPrefs::SetAllowFileAccess(const std::string& extension_id, bool allow) { UpdateExtensionPref(extension_id, kPrefAllowFileAccess, - new base::Value(allow)); + base::MakeUnique<base::Value>(allow)); } bool ExtensionPrefs::HasAllowFileAccessSetting( @@ -1110,9 +1111,9 @@ // true, which signifies that the registry key was deleted or the pref file // no longer lists the extension). if (!external_uninstall && Manifest::IsExternalLocation(location)) { - UpdateExtensionPref( - extension_id, kPrefState, - new base::Value(Extension::EXTERNAL_EXTENSION_UNINSTALLED)); + UpdateExtensionPref(extension_id, kPrefState, + base::MakeUnique<base::Value>( + Extension::EXTERNAL_EXTENSION_UNINSTALLED)); extension_pref_value_map_->SetExtensionState(extension_id, false); for (auto& observer : observer_list_) observer.OnExtensionStateChanged(extension_id, false); @@ -1123,7 +1124,7 @@ void ExtensionPrefs::SetExtensionEnabled(const std::string& extension_id) { UpdateExtensionPref(extension_id, kPrefState, - new base::Value(Extension::ENABLED)); + base::MakeUnique<base::Value>(Extension::ENABLED)); extension_pref_value_map_->SetExtensionState(extension_id, true); UpdateExtensionPref(extension_id, kPrefDisableReasons, nullptr); for (auto& observer : observer_list_) @@ -1134,11 +1135,11 @@ int disable_reasons) { if (!IsExternalExtensionUninstalled(extension_id)) { UpdateExtensionPref(extension_id, kPrefState, - new base::Value(Extension::DISABLED)); + base::MakeUnique<base::Value>(Extension::DISABLED)); extension_pref_value_map_->SetExtensionState(extension_id, false); } UpdateExtensionPref(extension_id, kPrefDisableReasons, - new base::Value(disable_reasons)); + base::MakeUnique<base::Value>(disable_reasons)); for (auto& observer : observer_list_) observer.OnExtensionStateChanged(extension_id, false); } @@ -1147,7 +1148,7 @@ BlacklistState state) { SetExtensionBlacklisted(extension_id, state == BLACKLISTED_MALWARE); UpdateExtensionPref(extension_id, kPrefBlacklistState, - new base::Value(state)); + base::MakeUnique<base::Value>(state)); } BlacklistState ExtensionPrefs::GetExtensionBlacklistState( @@ -1186,7 +1187,7 @@ !extension->manifest()->value()->Equals(old_manifest); if (update_required) { UpdateExtensionPref(extension->id(), kPrefManifest, - extension->manifest()->value()->DeepCopy()); + extension->manifest()->value()->CreateDeepCopy()); } } } @@ -1302,13 +1303,10 @@ DelayReason delay_reason, const syncer::StringOrdinal& page_ordinal, const std::string& install_parameter) { - base::DictionaryValue* extension_dict = new base::DictionaryValue(); - PopulateExtensionInfoPrefs(extension, - time_provider_->GetCurrentTime(), - initial_state, - install_flags, - install_parameter, - extension_dict); + auto extension_dict = base::MakeUnique<base::DictionaryValue>(); + PopulateExtensionInfoPrefs(extension, time_provider_->GetCurrentTime(), + initial_state, install_flags, install_parameter, + extension_dict.get()); // Add transient data that is needed by FinishDelayedInstallInfo(), but // should not be in the final extension prefs. All entries here should have @@ -1322,7 +1320,8 @@ extension_dict->SetInteger(kDelayedInstallReason, static_cast<int>(delay_reason)); - UpdateExtensionPref(extension->id(), kDelayedInstallInfo, extension_dict); + UpdateExtensionPref(extension->id(), kDelayedInstallInfo, + std::move(extension_dict)); } bool ExtensionPrefs::RemoveDelayedInstallInfo( @@ -1431,7 +1430,8 @@ } void ExtensionPrefs::SetAppDraggedByUser(const std::string& extension_id) { - UpdateExtensionPref(extension_id, kPrefUserDraggedApp, new base::Value(true)); + UpdateExtensionPref(extension_id, kPrefUserDraggedApp, + base::MakeUnique<base::Value>(true)); } bool ExtensionPrefs::IsFromWebStore( @@ -1662,7 +1662,7 @@ void ExtensionPrefs::SetGeometryCache( const std::string& extension_id, std::unique_ptr<base::DictionaryValue> cache) { - UpdateExtensionPref(extension_id, kPrefGeometryCache, cache.release()); + UpdateExtensionPref(extension_id, kPrefGeometryCache, std::move(cache)); } const base::DictionaryValue* ExtensionPrefs::GetInstallSignature() const { @@ -1694,7 +1694,7 @@ void ExtensionPrefs::SetInstallParam(const std::string& extension_id, const std::string& install_parameter) { UpdateExtensionPref(extension_id, kPrefInstallParam, - new base::Value(install_parameter)); + base::MakeUnique<base::Value>(install_parameter)); } int ExtensionPrefs::GetCorruptedDisableCount() const { @@ -1712,8 +1712,9 @@ void ExtensionPrefs::SetNeedsSync(const std::string& extension_id, bool needs_sync) { - UpdateExtensionPref(extension_id, kPrefNeedsSync, - needs_sync ? new base::Value(true) : nullptr); + UpdateExtensionPref( + extension_id, kPrefNeedsSync, + needs_sync ? base::MakeUnique<base::Value>(true) : nullptr); } ExtensionPrefs::ExtensionPrefs(
diff --git a/extensions/browser/extension_prefs.h b/extensions/browser/extension_prefs.h index 226db3e..9a3d98a 100644 --- a/extensions/browser/extension_prefs.h +++ b/extensions/browser/extension_prefs.h
@@ -226,7 +226,7 @@ // ExtensionScopedPrefs methods: void UpdateExtensionPref(const std::string& id, const std::string& key, - base::Value* value) override; + std::unique_ptr<base::Value> value) override; void DeleteExtensionPrefs(const std::string& id) override;
diff --git a/extensions/browser/extension_scoped_prefs.h b/extensions/browser/extension_scoped_prefs.h index 15fc577..f814db7 100644 --- a/extensions/browser/extension_scoped_prefs.h +++ b/extensions/browser/extension_scoped_prefs.h
@@ -5,6 +5,15 @@ #ifndef EXTENSIONS_BROWSER_EXTENSION_SCOPED_PREFS_H_ #define EXTENSIONS_BROWSER_EXTENSION_SCOPED_PREFS_H_ +#include <memory> +#include <string> + +namespace base { +class DictionaryValue; +class ListValue; +class Value; +} + namespace extensions { class ExtensionScopedPrefs { @@ -15,7 +24,7 @@ // Sets the pref |key| for extension |id| to |value|. virtual void UpdateExtensionPref(const std::string& id, const std::string& key, - base::Value* value) = 0; + std::unique_ptr<base::Value> value) = 0; // Deletes the pref dictionary for extension |id|. virtual void DeleteExtensionPrefs(const std::string& id) = 0;
diff --git a/extensions/common/event_filtering_info.cc b/extensions/common/event_filtering_info.cc index b8040e9..4770d9e 100644 --- a/extensions/common/event_filtering_info.cc +++ b/extensions/common/event_filtering_info.cc
@@ -12,15 +12,6 @@ namespace extensions { -namespace { - -const char kInstanceId[] = "instanceId"; -const char kServiceType[] = "serviceType"; -const char kWindowType[] = "windowType"; -const char kWindowExposedByDefault[] = "windowExposedByDefault"; - -} - EventFilteringInfo::EventFilteringInfo() : has_url_(false), has_instance_id_(false), @@ -28,24 +19,6 @@ has_window_type_(false), has_window_exposed_by_default_(false) {} -EventFilteringInfo::EventFilteringInfo(const base::DictionaryValue& dict) - : EventFilteringInfo() { - std::string url; - if (dict.GetString("url", &url)) { - GURL maybe_url(url); - if (maybe_url.is_valid()) { - has_url_ = true; - url_.Swap(&maybe_url); - } - } - - has_instance_id_ = dict.GetInteger(kInstanceId, &instance_id_); - dict.GetString(kServiceType, &service_type_); - has_window_type_ = dict.GetString(kWindowType, &window_type_); - has_window_exposed_by_default_ = - dict.GetBoolean(kWindowExposedByDefault, &window_exposed_by_default_); -} - EventFilteringInfo::EventFilteringInfo(const EventFilteringInfo& other) = default; @@ -78,16 +51,16 @@ result->SetString("url", url_.spec()); if (has_instance_id_) - result->SetInteger(kInstanceId, instance_id_); + result->SetInteger("instanceId", instance_id_); if (!service_type_.empty()) - result->SetString(kServiceType, service_type_); + result->SetString("serviceType", service_type_); if (has_window_type_) - result->SetString(kWindowType, window_type_); + result->SetString("windowType", window_type_); if (has_window_exposed_by_default_) - result->SetBoolean(kWindowExposedByDefault, window_exposed_by_default_); + result->SetBoolean("windowExposedByDefault", window_exposed_by_default_); return result; }
diff --git a/extensions/common/event_filtering_info.h b/extensions/common/event_filtering_info.h index ea7596ca..e6b7f9f 100644 --- a/extensions/common/event_filtering_info.h +++ b/extensions/common/event_filtering_info.h
@@ -26,7 +26,6 @@ class EventFilteringInfo { public: EventFilteringInfo(); - explicit EventFilteringInfo(const base::DictionaryValue& dict); EventFilteringInfo(const EventFilteringInfo& other); ~EventFilteringInfo(); void SetWindowExposedByDefault(bool exposed);
diff --git a/extensions/renderer/BUILD.gn b/extensions/renderer/BUILD.gn index aa9371b..d5bcad0b 100644 --- a/extensions/renderer/BUILD.gn +++ b/extensions/renderer/BUILD.gn
@@ -32,8 +32,6 @@ "api_definitions_natives.h", "api_event_handler.cc", "api_event_handler.h", - "api_event_listeners.cc", - "api_event_listeners.h", "api_last_error.cc", "api_last_error.h", "api_request_handler.cc", @@ -297,7 +295,6 @@ "api_bindings_system_unittest.cc", "api_bindings_system_unittest.h", "api_event_handler_unittest.cc", - "api_event_listeners_unittest.cc", "api_last_error_unittest.cc", "api_request_handler_unittest.cc", "api_test_base.cc",
diff --git a/extensions/renderer/api_binding.cc b/extensions/renderer/api_binding.cc index 648e8a727..e16bef1 100644 --- a/extensions/renderer/api_binding.cc +++ b/extensions/renderer/api_binding.cc
@@ -90,19 +90,15 @@ struct APIBinding::EventData { EventData(std::string exposed_name, std::string full_name, - bool supports_filters, APIEventHandler* event_handler) : exposed_name(std::move(exposed_name)), full_name(std::move(full_name)), - supports_filters(supports_filters), event_handler(event_handler) {} // The name of the event on the API object (e.g. onCreated). std::string exposed_name; // The fully-specified name of the event (e.g. tabs.onCreated). std::string full_name; - // Whether the event supports filters. - bool supports_filters; // The associated event handler. This raw pointer is safe because the // EventData is only accessed from the callbacks associated with the // APIBinding, and both the APIBinding and APIEventHandler are owned by the @@ -218,12 +214,8 @@ CHECK(event_dict->GetString("name", &name)); std::string full_name = base::StringPrintf("%s.%s", api_name_.c_str(), name.c_str()); - const base::ListValue* filters = nullptr; - bool supports_filters = - event_dict->GetList("filters", &filters) && !filters->empty(); - events_.push_back( - base::MakeUnique<EventData>(std::move(name), std::move(full_name), - supports_filters, event_handler)); + events_.push_back(base::MakeUnique<EventData>( + std::move(name), std::move(full_name), event_handler)); } } } @@ -383,7 +375,7 @@ auto* event_data = static_cast<EventData*>(info.Data().As<v8::External>()->Value()); info.GetReturnValue().Set(event_data->event_handler->CreateEventInstance( - event_data->full_name, event_data->supports_filters, context)); + event_data->full_name, context)); } void APIBinding::GetCustomPropertyObject(
diff --git a/extensions/renderer/api_binding_js_util.cc b/extensions/renderer/api_binding_js_util.cc index 84b59663..d554bb54 100644 --- a/extensions/renderer/api_binding_js_util.cc +++ b/extensions/renderer/api_binding_js_util.cc
@@ -75,10 +75,11 @@ event_handler_->RegisterArgumentMassager(context, event_name, massager); } -void APIBindingJSUtil::CreateCustomEvent(gin::Arguments* arguments, - v8::Local<v8::Value> v8_event_name, - v8::Local<v8::Value> unused_schema, - bool supports_filters) { +void APIBindingJSUtil::CreateCustomEvent( + gin::Arguments* arguments, + v8::Local<v8::Value> v8_event_name, + v8::Local<v8::Value> unused_schema, + v8::Local<v8::Value> unused_event_options) { v8::Isolate* isolate = arguments->isolate(); v8::HandleScope handle_scope(isolate); v8::Local<v8::Object> holder; @@ -94,16 +95,11 @@ event_name = gin::V8ToString(v8_event_name); } - DCHECK(!supports_filters || !event_name.empty()) - << "Events that support filters cannot be anonymous."; - v8::Local<v8::Value> event; - if (event_name.empty()) { + if (event_name.empty()) event = event_handler_->CreateAnonymousEventInstance(context); - } else { - event = event_handler_->CreateEventInstance(event_name, supports_filters, - context); - } + else + event = event_handler_->CreateEventInstance(event_name, context); arguments->Return(event); }
diff --git a/extensions/renderer/api_binding_js_util.h b/extensions/renderer/api_binding_js_util.h index a02b07e..00198069 100644 --- a/extensions/renderer/api_binding_js_util.h +++ b/extensions/renderer/api_binding_js_util.h
@@ -51,14 +51,12 @@ // A handler to allow custom bindings to create custom extension API event // objects (e.g. foo.onBar). - // Note: The JS version allows for constructing declarative events; it's - // unclear if we'll need to support this. - // TODO(devlin): Currently, we ignore schema. We may want to take it into - // account. + // TODO(devlin): Currently, we ignore schema and options. We'll need to take + // at least options into account. void CreateCustomEvent(gin::Arguments* arguments, v8::Local<v8::Value> v8_event_name, v8::Local<v8::Value> unused_schema, - bool supports_filters); + v8::Local<v8::Value> unused_event_options); // Invalidates an event, removing its listeners and preventing any more from // being added.
diff --git a/extensions/renderer/api_binding_unittest.cc b/extensions/renderer/api_binding_unittest.cc index a86e593..63a8942 100644 --- a/extensions/renderer/api_binding_unittest.cc +++ b/extensions/renderer/api_binding_unittest.cc
@@ -122,7 +122,6 @@ void OnEventListenersChanged(const std::string& event_name, binding::EventListenersChanged change, - const base::DictionaryValue* filter, v8::Local<v8::Context> context) {} } // namespace
diff --git a/extensions/renderer/api_bindings_system.cc b/extensions/renderer/api_bindings_system.cc index 64ba5864..116d0cc5 100644 --- a/extensions/renderer/api_bindings_system.cc +++ b/extensions/renderer/api_bindings_system.cc
@@ -103,9 +103,8 @@ void APIBindingsSystem::FireEventInContext(const std::string& event_name, v8::Local<v8::Context> context, - const base::ListValue& response, - const EventFilteringInfo& filter) { - event_handler_.FireEventInContext(event_name, context, response, filter); + const base::ListValue& response) { + event_handler_.FireEventInContext(event_name, context, response); } APIBindingHooks* APIBindingsSystem::GetHooksForAPI(
diff --git a/extensions/renderer/api_bindings_system.h b/extensions/renderer/api_bindings_system.h index 8b9e86d5..157fa76 100644 --- a/extensions/renderer/api_bindings_system.h +++ b/extensions/renderer/api_bindings_system.h
@@ -67,8 +67,7 @@ // listeners. void FireEventInContext(const std::string& event_name, v8::Local<v8::Context> context, - const base::ListValue& response, - const EventFilteringInfo& filter); + const base::ListValue& response); // Returns the APIBindingHooks object for the given api to allow for // registering custom hooks. These must be registered *before* the
diff --git a/extensions/renderer/api_bindings_system_unittest.cc b/extensions/renderer/api_bindings_system_unittest.cc index ac5729a..bac9580 100644 --- a/extensions/renderer/api_bindings_system_unittest.cc +++ b/extensions/renderer/api_bindings_system_unittest.cc
@@ -10,7 +10,6 @@ #include "base/stl_util.h" #include "base/strings/stringprintf.h" #include "base/values.h" -#include "extensions/common/event_filtering_info.h" #include "extensions/common/extension_api.h" #include "extensions/renderer/api_binding.h" #include "extensions/renderer/api_binding_hooks.h" @@ -159,7 +158,6 @@ void APIBindingsSystemTest::OnEventListenersChanged( const std::string& event_name, binding::EventListenersChanged changed, - const base::DictionaryValue* filter, v8::Local<v8::Context> context) {} void APIBindingsSystemTest::ValidateLastRequest( @@ -269,7 +267,7 @@ std::unique_ptr<base::ListValue> expected_args = ListValueFromString(kResponseArgsJson); bindings_system()->FireEventInContext("alpha.alphaEvent", context, - *expected_args, EventFilteringInfo()); + *expected_args); EXPECT_EQ(ReplaceSingleQuotes(kResponseArgsJson), GetStringPropertyFromObject(context->Global(), context,
diff --git a/extensions/renderer/api_bindings_system_unittest.h b/extensions/renderer/api_bindings_system_unittest.h index f635d79..f79bdef0 100644 --- a/extensions/renderer/api_bindings_system_unittest.h +++ b/extensions/renderer/api_bindings_system_unittest.h
@@ -57,7 +57,6 @@ // Callback for event listeners changing. void OnEventListenersChanged(const std::string& event_name, binding::EventListenersChanged changed, - const base::DictionaryValue* filter, v8::Local<v8::Context> context); // Callback for an API request being made. Stores the request in
diff --git a/extensions/renderer/api_event_handler.cc b/extensions/renderer/api_event_handler.cc index f3b4c847..33de4ff 100644 --- a/extensions/renderer/api_event_handler.cc +++ b/extensions/renderer/api_event_handler.cc
@@ -15,7 +15,6 @@ #include "base/supports_user_data.h" #include "base/values.h" #include "content/public/child/v8_value_converter.h" -#include "extensions/renderer/api_event_listeners.h" #include "extensions/renderer/event_emitter.h" #include "gin/handle.h" #include "gin/per_context_data.h" @@ -25,7 +24,6 @@ namespace { void DoNothingOnListenersChanged(binding::EventListenersChanged change, - const base::DictionaryValue* filter, v8::Local<v8::Context> context) {} const char kExtensionAPIEventPerContextKey[] = "extension_api_events"; @@ -100,7 +98,7 @@ gin::Converter<EventEmitter*>::FromV8(isolate, v8_emitter.Get(isolate), &emitter); CHECK(emitter); - emitter->Fire(context, &args, nullptr); + emitter->Fire(context, &args); } } // namespace @@ -113,7 +111,6 @@ v8::Local<v8::Object> APIEventHandler::CreateEventInstance( const std::string& event_name, - bool supports_filters, v8::Local<v8::Context> context) { // We need a context scope since gin::CreateHandle only takes the isolate // and infers the context from that. @@ -124,19 +121,9 @@ APIEventPerContextData* data = GetContextData(context, true); DCHECK(data->emitters.find(event_name) == data->emitters.end()); - APIEventListeners::ListenersUpdated updated = - base::Bind(listeners_changed_, event_name); - std::unique_ptr<APIEventListeners> listeners; - if (supports_filters) { - listeners = base::MakeUnique<FilteredEventListeners>(updated, event_name, - &event_filter_); - } else { - listeners = base::MakeUnique<UnfilteredEventListeners>(updated); - } - gin::Handle<EventEmitter> emitter_handle = gin::CreateHandle( context->GetIsolate(), - new EventEmitter(supports_filters, std::move(listeners), call_js_)); + new EventEmitter(call_js_, base::Bind(listeners_changed_, event_name))); CHECK(!emitter_handle.IsEmpty()); v8::Local<v8::Value> emitter_value = emitter_handle.ToV8(); CHECK(emitter_value->IsObject()); @@ -152,13 +139,9 @@ v8::Local<v8::Context> context) { v8::Context::Scope context_scope(context); APIEventPerContextData* data = GetContextData(context, true); - bool supports_filters = false; - std::unique_ptr<APIEventListeners> listeners = - base::MakeUnique<UnfilteredEventListeners>( - base::Bind(&DoNothingOnListenersChanged)); gin::Handle<EventEmitter> emitter_handle = gin::CreateHandle( context->GetIsolate(), - new EventEmitter(supports_filters, std::move(listeners), call_js_)); + new EventEmitter(call_js_, base::Bind(&DoNothingOnListenersChanged))); CHECK(!emitter_handle.IsEmpty()); v8::Local<v8::Object> emitter_object = emitter_handle.ToV8().As<v8::Object>(); data->anonymous_emitters.push_back( @@ -176,7 +159,7 @@ return; } - emitter->Invalidate(context); + emitter->Invalidate(); auto emitter_entry = std::find(data->anonymous_emitters.begin(), data->anonymous_emitters.end(), event); if (emitter_entry == data->anonymous_emitters.end()) { @@ -189,8 +172,7 @@ void APIEventHandler::FireEventInContext(const std::string& event_name, v8::Local<v8::Context> context, - const base::ListValue& args, - const EventFilteringInfo& filter) { + const base::ListValue& args) { APIEventPerContextData* data = GetContextData(context, false); if (!data) return; @@ -205,7 +187,7 @@ &emitter); CHECK(emitter); - if (emitter->GetNumListeners() == 0u) + if (emitter->listeners()->empty()) return; // Note: since we only convert the arguments once, if a listener modifies an @@ -220,7 +202,7 @@ v8_args.reserve(args.GetSize()); for (const auto& arg : args) v8_args.push_back(converter->ToV8Value(arg.get(), context)); - emitter->Fire(context, &v8_args, &filter); + emitter->Fire(context, &v8_args); } else { v8::Isolate* isolate = context->GetIsolate(); v8::HandleScope handle_scope(isolate); @@ -271,14 +253,17 @@ gin::Converter<EventEmitter*>::FromV8(isolate, pair.second.Get(isolate), &emitter); CHECK(emitter); - emitter->Invalidate(context); + emitter->Invalidate(); + // When the context is shut down, all listeners are removed. + listeners_changed_.Run( + pair.first, binding::EventListenersChanged::NO_LISTENERS, context); } for (const auto& global : data->anonymous_emitters) { EventEmitter* emitter = nullptr; gin::Converter<EventEmitter*>::FromV8(isolate, global.Get(isolate), &emitter); CHECK(emitter); - emitter->Invalidate(context); + emitter->Invalidate(); } data->emitters.clear(); @@ -303,7 +288,7 @@ gin::Converter<EventEmitter*>::FromV8( context->GetIsolate(), iter->second.Get(context->GetIsolate()), &emitter); CHECK(emitter); - return emitter->GetNumListeners(); + return emitter->listeners()->size(); } } // namespace extensions
diff --git a/extensions/renderer/api_event_handler.h b/extensions/renderer/api_event_handler.h index 33e6d8a..a0ba364 100644 --- a/extensions/renderer/api_event_handler.h +++ b/extensions/renderer/api_event_handler.h
@@ -9,7 +9,6 @@ #include "base/callback.h" #include "base/macros.h" -#include "extensions/common/event_filter.h" #include "extensions/renderer/api_binding_types.h" #include "extensions/renderer/event_emitter.h" #include "v8/include/v8.h" @@ -19,7 +18,6 @@ } namespace extensions { -class EventFilteringInfo; // The object to handle API events. This includes vending v8::Objects for the // event; handling adding, removing, and querying listeners; and firing events @@ -30,7 +28,6 @@ using EventListenersChangedMethod = base::Callback<void(const std::string& event_name, binding::EventListenersChanged, - const base::DictionaryValue* filter, v8::Local<v8::Context>)>; APIEventHandler(const binding::RunJSFunction& call_js, @@ -39,7 +36,6 @@ // Returns a new v8::Object for an event with the given |event_name|. v8::Local<v8::Object> CreateEventInstance(const std::string& event_name, - bool supports_filters, v8::Local<v8::Context> context); // Creates a new event without any name. This is used by custom bindings when @@ -57,8 +53,7 @@ // specified |context|, sending the included |arguments|. void FireEventInContext(const std::string& event_name, v8::Local<v8::Context> context, - const base::ListValue& arguments, - const EventFilteringInfo& filter); + const base::ListValue& arguments); // Registers a |function| to serve as an "argument massager" for the given // |event_name|, mutating the original arguments. @@ -85,9 +80,6 @@ EventListenersChangedMethod listeners_changed_; - // The associated EventFilter; shared across all contexts and events. - EventFilter event_filter_; - DISALLOW_COPY_AND_ASSIGN(APIEventHandler); };
diff --git a/extensions/renderer/api_event_handler_unittest.cc b/extensions/renderer/api_event_handler_unittest.cc index 3230ebb..3a376b4 100644 --- a/extensions/renderer/api_event_handler_unittest.cc +++ b/extensions/renderer/api_event_handler_unittest.cc
@@ -10,7 +10,6 @@ #include "base/optional.h" #include "base/test/mock_callback.h" #include "base/values.h" -#include "extensions/common/event_filtering_info.h" #include "extensions/renderer/api_binding_test.h" #include "extensions/renderer/api_binding_test_util.h" #include "gin/arguments.h" @@ -27,7 +26,6 @@ void DoNothingOnEventListenersChanged(const std::string& event_name, binding::EventListenersChanged change, - const base::DictionaryValue* value, v8::Local<v8::Context> context) {} class APIEventHandlerTest : public APIBindingTest { @@ -75,7 +73,7 @@ v8::Local<v8::Context> context = MainContext(); v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); EXPECT_EQ(0u, handler()->GetNumEventListenersForTesting(kEventName, context)); @@ -174,9 +172,9 @@ v8::Local<v8::Context> context = MainContext(); v8::Local<v8::Object> alpha_event = - handler()->CreateEventInstance(kAlphaName, false, context); + handler()->CreateEventInstance(kAlphaName, context); v8::Local<v8::Object> beta_event = - handler()->CreateEventInstance(kBetaName, false, context); + handler()->CreateEventInstance(kBetaName, context); ASSERT_FALSE(alpha_event.IsEmpty()); ASSERT_FALSE(beta_event.IsEmpty()); @@ -243,8 +241,7 @@ EXPECT_EQ(0, get_fired_count("alphaCount2")); EXPECT_EQ(0, get_fired_count("betaCount")); - handler()->FireEventInContext(kAlphaName, context, base::ListValue(), - EventFilteringInfo()); + handler()->FireEventInContext(kAlphaName, context, base::ListValue()); EXPECT_EQ(2u, handler()->GetNumEventListenersForTesting(kAlphaName, context)); EXPECT_EQ(1u, handler()->GetNumEventListenersForTesting(kBetaName, context)); @@ -252,14 +249,12 @@ EXPECT_EQ(1, get_fired_count("alphaCount2")); EXPECT_EQ(0, get_fired_count("betaCount")); - handler()->FireEventInContext(kAlphaName, context, base::ListValue(), - EventFilteringInfo()); + handler()->FireEventInContext(kAlphaName, context, base::ListValue()); EXPECT_EQ(2, get_fired_count("alphaCount1")); EXPECT_EQ(2, get_fired_count("alphaCount2")); EXPECT_EQ(0, get_fired_count("betaCount")); - handler()->FireEventInContext(kBetaName, context, base::ListValue(), - EventFilteringInfo()); + handler()->FireEventInContext(kBetaName, context, base::ListValue()); EXPECT_EQ(2, get_fired_count("alphaCount1")); EXPECT_EQ(2, get_fired_count("alphaCount2")); EXPECT_EQ(1, get_fired_count("betaCount")); @@ -272,7 +267,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); const char kListenerFunction[] = @@ -293,8 +288,7 @@ const char kArguments[] = "['foo',1,{'prop1':'bar'}]"; std::unique_ptr<base::ListValue> event_args = ListValueFromString(kArguments); ASSERT_TRUE(event_args); - handler()->FireEventInContext(kEventName, context, *event_args, - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context, *event_args); EXPECT_EQ( ReplaceSingleQuotes(kArguments), @@ -320,10 +314,10 @@ // Create two instances of the same event in different contexts. v8::Local<v8::Object> event_a = - handler()->CreateEventInstance(kEventName, false, context_a); + handler()->CreateEventInstance(kEventName, context_a); ASSERT_FALSE(event_a.IsEmpty()); v8::Local<v8::Object> event_b = - handler()->CreateEventInstance(kEventName, false, context_b); + handler()->CreateEventInstance(kEventName, context_b); ASSERT_FALSE(event_b.IsEmpty()); // Add two separate listeners to the event, one in each context. @@ -357,8 +351,7 @@ ListValueFromString("['result_a:']"); ASSERT_TRUE(arguments_a); - handler()->FireEventInContext(kEventName, context_a, *arguments_a, - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context_a, *arguments_a); { EXPECT_EQ("\"result_a:alpha\"", GetStringPropertyFromObject(context_a->Global(), context_a, @@ -374,8 +367,7 @@ std::unique_ptr<base::ListValue> arguments_b = ListValueFromString("['result_b:']"); ASSERT_TRUE(arguments_b); - handler()->FireEventInContext(kEventName, context_b, *arguments_b, - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context_b, *arguments_b); { EXPECT_EQ("\"result_a:alpha\"", GetStringPropertyFromObject(context_a->Global(), context_a, @@ -394,7 +386,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); const char kAddListenerOnNull[] = @@ -446,7 +438,7 @@ v8::Local<v8::Context> context = MainContext(); v8::Local<v8::Object> event = - handler()->CreateEventInstance("alpha", false, context); + handler()->CreateEventInstance("alpha", context); ASSERT_FALSE(event.IsEmpty()); const char kListenerFunction[] = @@ -487,7 +479,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); { // Cache the event object on the global in order to allow for easy removal. @@ -529,8 +521,7 @@ // Fire the event. All listeners should be removed (and we shouldn't crash). EXPECT_EQ(kNumListeners, handler()->GetNumEventListenersForTesting(kEventName, context)); - handler()->FireEventInContext(kEventName, context, base::ListValue(), - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context, base::ListValue()); EXPECT_EQ(0u, handler()->GetNumEventListenersForTesting(kEventName, context)); // TODO(devlin): Another possible test: register listener a and listener b, @@ -562,7 +553,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); bool did_throw = false; @@ -610,8 +601,7 @@ std::unique_ptr<base::ListValue> event_args = ListValueFromString("[42]"); ASSERT_TRUE(event_args); - handler()->FireEventInContext(kEventName, context, *event_args, - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context, *event_args); // An exception should have been thrown by the first listener and the second // listener should have recorded the event arguments. @@ -636,13 +626,13 @@ const char kEventName1[] = "onFoo"; const char kEventName2[] = "onBar"; v8::Local<v8::Object> event1_a = - handler()->CreateEventInstance(kEventName1, false, context_a); + handler()->CreateEventInstance(kEventName1, context_a); ASSERT_FALSE(event1_a.IsEmpty()); v8::Local<v8::Object> event2_a = - handler()->CreateEventInstance(kEventName2, false, context_a); + handler()->CreateEventInstance(kEventName2, context_a); ASSERT_FALSE(event2_a.IsEmpty()); v8::Local<v8::Object> event1_b = - handler()->CreateEventInstance(kEventName1, false, context_b); + handler()->CreateEventInstance(kEventName1, context_b); ASSERT_FALSE(event1_b.IsEmpty()); const char kAddListenerFunction[] = @@ -659,7 +649,7 @@ { EXPECT_CALL(change_handler, Run(kEventName1, binding::EventListenersChanged::HAS_LISTENERS, - nullptr, context_a)) + context_a)) .Times(1); v8::Local<v8::Value> argv[] = {event1_a, listener1}; RunFunction(add_listener, context_a, arraysize(argv), argv); @@ -696,7 +686,7 @@ { EXPECT_CALL(change_handler, Run(kEventName1, binding::EventListenersChanged::NO_LISTENERS, - nullptr, context_a)) + context_a)) .Times(1); v8::Local<v8::Value> argv[] = {event1_a, listener2}; RunFunction(remove_listener, context_a, arraysize(argv), argv); @@ -712,7 +702,7 @@ { EXPECT_CALL(change_handler, Run(kEventName2, binding::EventListenersChanged::HAS_LISTENERS, - nullptr, context_a)) + context_a)) .Times(1); v8::Local<v8::Value> argv[] = {event2_a, listener3}; RunFunction(add_listener, context_a, arraysize(argv), argv); @@ -724,7 +714,7 @@ { EXPECT_CALL(change_handler, Run(kEventName1, binding::EventListenersChanged::HAS_LISTENERS, - nullptr, context_b)) + context_b)) .Times(1); // And add a listener to an event in a different context to make sure the // associated context is correct. @@ -741,16 +731,20 @@ // When the contexts are invalidated, we should receive listener removed // notifications. - EXPECT_CALL(change_handler, - Run(kEventName2, binding::EventListenersChanged::NO_LISTENERS, - nullptr, context_a)) + EXPECT_CALL( + change_handler, + Run(kEventName1, binding::EventListenersChanged::NO_LISTENERS, context_a)) + .Times(1); + EXPECT_CALL( + change_handler, + Run(kEventName2, binding::EventListenersChanged::NO_LISTENERS, context_a)) .Times(1); DisposeContext(context_a); ::testing::Mock::VerifyAndClearExpectations(&change_handler); - EXPECT_CALL(change_handler, - Run(kEventName1, binding::EventListenersChanged::NO_LISTENERS, - nullptr, context_b)) + EXPECT_CALL( + change_handler, + Run(kEventName1, binding::EventListenersChanged::NO_LISTENERS, context_b)) .Times(1); DisposeContext(context_b); ::testing::Mock::VerifyAndClearExpectations(&change_handler); @@ -763,7 +757,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); const char kArgumentMassager[] = @@ -793,8 +787,7 @@ const char kArguments[] = "['first','second']"; std::unique_ptr<base::ListValue> event_args = ListValueFromString(kArguments); ASSERT_TRUE(event_args); - handler()->FireEventInContext(kEventName, context, *event_args, - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context, *event_args); EXPECT_EQ( "[\"first\",\"second\"]", @@ -812,7 +805,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); const char kArgumentMassager[] = @@ -842,8 +835,7 @@ const char kArguments[] = "['first','second']"; std::unique_ptr<base::ListValue> event_args = ListValueFromString(kArguments); ASSERT_TRUE(event_args); - handler()->FireEventInContext(kEventName, context, *event_args, - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context, *event_args); // The massager should have been triggered, but since it doesn't call // dispatch(), the listener shouldn't have been notified. @@ -876,7 +868,7 @@ const char kEventName[] = "alpha"; v8::Local<v8::Object> event = - handler()->CreateEventInstance(kEventName, false, context); + handler()->CreateEventInstance(kEventName, context); ASSERT_FALSE(event.IsEmpty()); // A massager that never dispatches. @@ -897,8 +889,7 @@ v8::Local<v8::Value> argv[] = {event, listener_function}; RunFunction(add_listener_function, context, arraysize(argv), argv); - handler()->FireEventInContext(kEventName, context, base::ListValue(), - EventFilteringInfo()); + handler()->FireEventInContext(kEventName, context, base::ListValue()); // Nothing should blow up. (We tested in the previous test that the event // isn't notified without calling dispatch, so all there is to test here is
diff --git a/extensions/renderer/api_event_listeners.cc b/extensions/renderer/api_event_listeners.cc deleted file mode 100644 index a59c9c6..0000000 --- a/extensions/renderer/api_event_listeners.cc +++ /dev/null
@@ -1,254 +0,0 @@ -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "extensions/renderer/api_event_listeners.h" - -#include <algorithm> -#include <memory> - -#include "base/memory/ptr_util.h" -#include "content/public/child/v8_value_converter.h" -#include "extensions/common/event_filter.h" -#include "extensions/common/event_filtering_info.h" -#include "extensions/common/event_matcher.h" -#include "gin/converter.h" - -namespace extensions { - -namespace { - -// TODO(devlin): The EventFilter supports adding EventMatchers associated with -// an id. For now, we ignore it and add/return everything associated with this -// constant. We should rethink that. -const int kIgnoreRoutingId = 0; - -// Pseudo-validates the given |filter| and converts it into a -// base::DictionaryValue. Returns true on success. -// TODO(devlin): This "validation" is pretty terrible. It matches the JS -// equivalent, but it's lousy and makes it easy for users to get it wrong. -// We should generate an argument spec for it and match it exactly. -bool ValidateFilter(v8::Local<v8::Context> context, - v8::Local<v8::Object> filter, - std::unique_ptr<base::DictionaryValue>* filter_dict, - std::string* error) { - v8::Isolate* isolate = context->GetIsolate(); - v8::HandleScope handle_scope(isolate); - - if (filter.IsEmpty()) { - *filter_dict = base::MakeUnique<base::DictionaryValue>(); - return true; - } - - v8::Local<v8::Value> url_filter; - if (!filter->Get(context, gin::StringToSymbol(isolate, "url")) - .ToLocal(&url_filter)) { - return false; - } - - if (!url_filter->IsUndefined() && !url_filter->IsArray()) { - *error = "filters.url should be an array."; - return false; - } - - v8::Local<v8::Value> service_type; - if (!filter->Get(context, gin::StringToSymbol(isolate, "serviceType")) - .ToLocal(&service_type)) { - return false; - } - - if (!service_type->IsUndefined() && !service_type->IsString()) { - *error = "filters.serviceType should be a string."; - return false; - } - - std::unique_ptr<content::V8ValueConverter> converter( - content::V8ValueConverter::create()); - std::unique_ptr<base::Value> value = converter->FromV8Value(filter, context); - if (!value || !value->is_dict()) { - *error = "could not convert filter."; - return false; - } - - *filter_dict = base::DictionaryValue::From(std::move(value)); - return true; -} - -} // namespace - -UnfilteredEventListeners::UnfilteredEventListeners( - const ListenersUpdated& listeners_updated) - : listeners_updated_(listeners_updated) {} -UnfilteredEventListeners::~UnfilteredEventListeners() = default; - -bool UnfilteredEventListeners::AddListener(v8::Local<v8::Function> listener, - v8::Local<v8::Object> filter, - v8::Local<v8::Context> context, - std::string* error) { - // |filter| should be checked before getting here. - DCHECK(filter.IsEmpty()) - << "Filtered events should use FilteredEventListeners"; - - if (HasListener(listener)) - return false; - - listeners_.push_back( - v8::Global<v8::Function>(context->GetIsolate(), listener)); - if (listeners_.size() == 1) { - listeners_updated_.Run(binding::EventListenersChanged::HAS_LISTENERS, - nullptr, context); - } - - return true; -} - -void UnfilteredEventListeners::RemoveListener(v8::Local<v8::Function> listener, - v8::Local<v8::Context> context) { - auto iter = std::find(listeners_.begin(), listeners_.end(), listener); - if (iter == listeners_.end()) - return; - - listeners_.erase(iter); - if (listeners_.empty()) { - listeners_updated_.Run(binding::EventListenersChanged::NO_LISTENERS, - nullptr, context); - } -} - -bool UnfilteredEventListeners::HasListener(v8::Local<v8::Function> listener) { - return std::find(listeners_.begin(), listeners_.end(), listener) != - listeners_.end(); -} - -size_t UnfilteredEventListeners::GetNumListeners() { - return listeners_.size(); -} - -std::vector<v8::Local<v8::Function>> UnfilteredEventListeners::GetListeners( - const EventFilteringInfo* filter, - v8::Local<v8::Context> context) { - std::vector<v8::Local<v8::Function>> listeners; - listeners.reserve(listeners_.size()); - for (const auto& listener : listeners_) - listeners.push_back(listener.Get(context->GetIsolate())); - return listeners; -} - -void UnfilteredEventListeners::Invalidate(v8::Local<v8::Context> context) { - if (!listeners_.empty()) { - listeners_.clear(); - listeners_updated_.Run(binding::EventListenersChanged::NO_LISTENERS, - nullptr, context); - } -} - -struct FilteredEventListeners::ListenerData { - bool operator==(v8::Local<v8::Function> other_function) const { - // Note that we only consider the listener function here, and not the - // filter. This implies that it's invalid to try and add the same - // function for multiple filters. - // TODO(devlin): It's always been this way, but should it be? - return function == other_function; - } - - v8::Global<v8::Function> function; - int filter_id; -}; - -FilteredEventListeners::FilteredEventListeners( - const ListenersUpdated& listeners_updated, - const std::string& event_name, - EventFilter* event_filter) - : listeners_updated_(listeners_updated), - event_name_(event_name), - event_filter_(event_filter) {} -FilteredEventListeners::~FilteredEventListeners() = default; - -bool FilteredEventListeners::AddListener(v8::Local<v8::Function> listener, - v8::Local<v8::Object> filter, - v8::Local<v8::Context> context, - std::string* error) { - if (HasListener(listener)) - return false; - - std::unique_ptr<base::DictionaryValue> filter_dict; - if (!ValidateFilter(context, filter, &filter_dict, error)) - return false; - - int filter_id = event_filter_->AddEventMatcher( - event_name_, - base::MakeUnique<EventMatcher>(std::move(filter_dict), kIgnoreRoutingId)); - - if (filter_id == -1) { - *error = "Could not add listener"; - return false; - } - - const EventMatcher* matcher = event_filter_->GetEventMatcher(filter_id); - DCHECK(matcher); - listeners_.push_back( - {v8::Global<v8::Function>(context->GetIsolate(), listener), filter_id}); - if (value_counter_.Add(*matcher->value())) { - listeners_updated_.Run(binding::EventListenersChanged::HAS_LISTENERS, - matcher->value(), context); - } - - return true; -} - -void FilteredEventListeners::RemoveListener(v8::Local<v8::Function> listener, - v8::Local<v8::Context> context) { - auto iter = std::find(listeners_.begin(), listeners_.end(), listener); - if (iter == listeners_.end()) - return; - - ListenerData data = std::move(*iter); - listeners_.erase(iter); - - InvalidateListener(data, context); -} - -bool FilteredEventListeners::HasListener(v8::Local<v8::Function> listener) { - return std::find(listeners_.begin(), listeners_.end(), listener) != - listeners_.end(); -} - -size_t FilteredEventListeners::GetNumListeners() { - return listeners_.size(); -} - -std::vector<v8::Local<v8::Function>> FilteredEventListeners::GetListeners( - const EventFilteringInfo* filter, - v8::Local<v8::Context> context) { - std::set<int> ids = event_filter_->MatchEvent( - event_name_, filter ? *filter : EventFilteringInfo(), kIgnoreRoutingId); - - std::vector<v8::Local<v8::Function>> listeners; - listeners.reserve(ids.size()); - for (const auto& listener : listeners_) { - if (ids.count(listener.filter_id)) - listeners.push_back(listener.function.Get(context->GetIsolate())); - } - return listeners; -} - -void FilteredEventListeners::Invalidate(v8::Local<v8::Context> context) { - for (const auto& listener : listeners_) - InvalidateListener(listener, context); - listeners_.clear(); -} - -void FilteredEventListeners::InvalidateListener( - const ListenerData& listener, - v8::Local<v8::Context> context) { - EventMatcher* matcher = event_filter_->GetEventMatcher(listener.filter_id); - DCHECK(matcher); - if (value_counter_.Remove(*matcher->value())) { - listeners_updated_.Run(binding::EventListenersChanged::NO_LISTENERS, - matcher->value(), context); - } - - event_filter_->RemoveEventMatcher(listener.filter_id); -} - -} // namespace extensions
diff --git a/extensions/renderer/api_event_listeners.h b/extensions/renderer/api_event_listeners.h deleted file mode 100644 index 307730c..0000000 --- a/extensions/renderer/api_event_listeners.h +++ /dev/null
@@ -1,155 +0,0 @@ -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef EXTENSIONS_RENDERER_API_EVENT_LISTENERS_H_ -#define EXTENSIONS_RENDERER_API_EVENT_LISTENERS_H_ - -#include <string> -#include <vector> - -#include "base/callback.h" -#include "base/macros.h" -#include "extensions/common/value_counter.h" -#include "extensions/renderer/api_binding_types.h" -#include "v8/include/v8.h" - -namespace base { -class DictionaryValue; -} - -namespace extensions { -class EventFilter; -class EventFilteringInfo; - -// A base class to hold listeners for a given event. This allows for adding, -// removing, and querying listeners in the list, and calling a callback when -// transitioning from 0 -> 1 or 1 -> 0 listeners. -class APIEventListeners { - public: - using ListenersUpdated = - base::Callback<void(binding::EventListenersChanged, - const base::DictionaryValue* filter, - v8::Local<v8::Context> context)>; - - virtual ~APIEventListeners() = default; - - // Adds the given |listener| to the list, possibly associating it with the - // given |filter|. Returns true if the listener is added. Populates |error| - // with any errors encountered. Note that |error| is *not* always populated - // if false is returned, since we don't consider trying to re-add a listener - // to be an error. - virtual bool AddListener(v8::Local<v8::Function> listener, - v8::Local<v8::Object> filter, - v8::Local<v8::Context> context, - std::string* error) = 0; - - // Removes the given |listener|, if it's present in the list. - virtual void RemoveListener(v8::Local<v8::Function> listener, - v8::Local<v8::Context> context) = 0; - - // Returns true if the given |listener| is in the list. - virtual bool HasListener(v8::Local<v8::Function> listener) = 0; - - // Returns the number of listeners in the list. - virtual size_t GetNumListeners() = 0; - - // Returns the listeners that should be notified for the given |filter|. - virtual std::vector<v8::Local<v8::Function>> GetListeners( - const EventFilteringInfo* filter, - v8::Local<v8::Context> context) = 0; - - // Invalidates the list. - virtual void Invalidate(v8::Local<v8::Context> context) = 0; - - protected: - APIEventListeners() {} - - private: - DISALLOW_COPY_AND_ASSIGN(APIEventListeners); -}; - -// A listener list implementation that doesn't support filtering. Each event -// dispatched is dispatched to all the associated listeners. -class UnfilteredEventListeners final : public APIEventListeners { - public: - UnfilteredEventListeners(const ListenersUpdated& listeners_updated); - ~UnfilteredEventListeners() override; - - bool AddListener(v8::Local<v8::Function> listener, - v8::Local<v8::Object> filter, - v8::Local<v8::Context> context, - std::string* error) override; - void RemoveListener(v8::Local<v8::Function> listener, - v8::Local<v8::Context> context) override; - bool HasListener(v8::Local<v8::Function> listener) override; - size_t GetNumListeners() override; - std::vector<v8::Local<v8::Function>> GetListeners( - const EventFilteringInfo* filter, - v8::Local<v8::Context> context) override; - void Invalidate(v8::Local<v8::Context> context) override; - - private: - // The event listeners associated with this event. - // TODO(devlin): Having these listeners held as v8::Globals means that we - // need to worry about cycles when a listener holds a reference to the event, - // e.g. EventEmitter -> Listener -> EventEmitter. Right now, we handle that by - // requiring Invalidate() to be called, but that means that events that aren't - // Invalidate()'d earlier can leak until context destruction. We could - // circumvent this by storing the listeners strongly in a private propery - // (thus traceable by v8), and optionally keep a weak cache on this object. - std::vector<v8::Global<v8::Function>> listeners_; - - ListenersUpdated listeners_updated_; - - DISALLOW_COPY_AND_ASSIGN(UnfilteredEventListeners); -}; - -// A listener list implementation that supports filtering. Events should only -// be dispatched to those listeners whose filters match. Additionally, the -// updated callback is triggered any time a listener with a new filter is -// added, or the last listener with a given filter is removed. -class FilteredEventListeners final : public APIEventListeners { - public: - FilteredEventListeners(const ListenersUpdated& listeners_updated, - const std::string& event_name, - EventFilter* event_filter); - ~FilteredEventListeners() override; - - bool AddListener(v8::Local<v8::Function> listener, - v8::Local<v8::Object> filter, - v8::Local<v8::Context> context, - std::string* error) override; - void RemoveListener(v8::Local<v8::Function> listener, - v8::Local<v8::Context> context) override; - bool HasListener(v8::Local<v8::Function> listener) override; - size_t GetNumListeners() override; - std::vector<v8::Local<v8::Function>> GetListeners( - const EventFilteringInfo* filter, - v8::Local<v8::Context> context) override; - void Invalidate(v8::Local<v8::Context> context) override; - - private: - struct ListenerData; - - void InvalidateListener(const ListenerData& listener, - v8::Local<v8::Context> context); - - // Note: See TODO on UnfilteredEventListeners::listeners_. - std::vector<ListenerData> listeners_; - - ListenersUpdated listeners_updated_; - - std::string event_name_; - - // The associated EventFilter; required to outlive this object. - EventFilter* event_filter_ = nullptr; - - ValueCounter value_counter_; - - DISALLOW_COPY_AND_ASSIGN(FilteredEventListeners); -}; - -} // namespace extensions - -#endif // EXTENSIONS_RENDERER_API_EVENT_LISTENERS_H_
diff --git a/extensions/renderer/api_event_listeners_unittest.cc b/extensions/renderer/api_event_listeners_unittest.cc deleted file mode 100644 index 6eeb36f..0000000 --- a/extensions/renderer/api_event_listeners_unittest.cc +++ /dev/null
@@ -1,427 +0,0 @@ -// Copyright 2017 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "extensions/renderer/api_event_listeners.h" - -#include "base/test/mock_callback.h" -#include "base/values.h" -#include "extensions/common/event_filter.h" -#include "extensions/renderer/api_binding_test.h" -#include "extensions/renderer/api_binding_test_util.h" -#include "testing/gmock/include/gmock/gmock.h" - -namespace extensions { - -namespace { - -using APIEventListenersTest = APIBindingTest; -using MockEventChangeHandler = ::testing::StrictMock< - base::MockCallback<APIEventListeners::ListenersUpdated>>; - -void DoNothingOnUpdate(binding::EventListenersChanged changed, - const base::DictionaryValue* filter, - v8::Local<v8::Context> context) {} - -const char kFunction[] = "(function() {})"; -const char kEvent[] = "event"; - -} // namespace - -// Test unfiltered listeners. -TEST_F(APIEventListenersTest, UnfilteredListeners) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - MockEventChangeHandler handler; - UnfilteredEventListeners listeners(handler.Get()); - - // Starting out, there should be no listeners. - v8::Local<v8::Function> function_a = FunctionFromString(context, kFunction); - EXPECT_EQ(0u, listeners.GetNumListeners()); - EXPECT_FALSE(listeners.HasListener(function_a)); - - std::string error; - v8::Local<v8::Object> filter; - - // Adding a new listener should trigger the callback (0 -> 1). - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - nullptr, context)); - EXPECT_TRUE(listeners.AddListener(function_a, filter, context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - - // function_a should be registered as a listener, and should be returned when - // we get the listeners. - EXPECT_TRUE(listeners.HasListener(function_a)); - EXPECT_EQ(1u, listeners.GetNumListeners()); - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_a)); - - // Trying to add function_a again should have no effect. - EXPECT_FALSE(listeners.AddListener(function_a, filter, context, &error)); - EXPECT_TRUE(listeners.HasListener(function_a)); - EXPECT_EQ(1u, listeners.GetNumListeners()); - - v8::Local<v8::Function> function_b = FunctionFromString(context, kFunction); - - // We should not yet have function_b registered, and trying to remove it - // should have no effect. - EXPECT_FALSE(listeners.HasListener(function_b)); - listeners.RemoveListener(function_b, context); - EXPECT_EQ(1u, listeners.GetNumListeners()); - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_a)); - - // Add function_b; there should now be two listeners, and both should be - // returned when we get the listeners. However, the callback shouldn't be - // triggered, since this isn't a 0 -> 1 or 1 -> 0 transition. - EXPECT_TRUE(listeners.AddListener(function_b, filter, context, &error)); - EXPECT_TRUE(listeners.HasListener(function_b)); - EXPECT_EQ(2u, listeners.GetNumListeners()); - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_a, function_b)); - - // Remove function_a; there should now be only one listener. The callback - // shouldn't be triggered. - listeners.RemoveListener(function_a, context); - EXPECT_FALSE(listeners.HasListener(function_a)); - EXPECT_EQ(1u, listeners.GetNumListeners()); - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_b)); - - // Remove function_b (the final listener). No more listeners should remain. - EXPECT_CALL(handler, Run(binding::EventListenersChanged::NO_LISTENERS, - nullptr, context)); - listeners.RemoveListener(function_b, context); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_FALSE(listeners.HasListener(function_b)); - EXPECT_EQ(0u, listeners.GetNumListeners()); - EXPECT_TRUE(listeners.GetListeners(nullptr, context).empty()); -} - -// Tests the invalidation of unfiltered listeners. -TEST_F(APIEventListenersTest, UnfilteredListenersInvalidation) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - MockEventChangeHandler handler; - UnfilteredEventListeners listeners(handler.Get()); - - listeners.Invalidate(context); - - v8::Local<v8::Function> function_a = FunctionFromString(context, kFunction); - v8::Local<v8::Function> function_b = FunctionFromString(context, kFunction); - std::string error; - v8::Local<v8::Object> filter; - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - nullptr, context)); - EXPECT_TRUE(listeners.AddListener(function_a, filter, context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_TRUE(listeners.AddListener(function_b, filter, context, &error)); - - EXPECT_CALL(handler, Run(binding::EventListenersChanged::NO_LISTENERS, - nullptr, context)); - listeners.Invalidate(context); - ::testing::Mock::VerifyAndClearExpectations(&handler); - - EXPECT_EQ(0u, listeners.GetNumListeners()); -} - -// Tests that unfiltered listeners ignore the filtering info. -TEST_F(APIEventListenersTest, UnfilteredListenersIgnoreFilteringInfo) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - UnfilteredEventListeners listeners(base::Bind(&DoNothingOnUpdate)); - v8::Local<v8::Function> function = FunctionFromString(context, kFunction); - std::string error; - v8::Local<v8::Object> filter; - EXPECT_TRUE(listeners.AddListener(function, filter, context, &error)); - std::unique_ptr<base::DictionaryValue> filtering_info_dict = - DictionaryValueFromString("{'url': 'http://example.com/foo'}"); - EventFilteringInfo filtering_info(*filtering_info_dict); - EXPECT_THAT(listeners.GetListeners(&filtering_info, context), - testing::UnorderedElementsAre(function)); -} - -// Tests filtered listeners. -TEST_F(APIEventListenersTest, FilteredListeners) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - MockEventChangeHandler handler; - EventFilter event_filter; - FilteredEventListeners listeners(handler.Get(), kEvent, &event_filter); - - // Starting out, there should be no listeners registered. - v8::Local<v8::Function> function_a = FunctionFromString(context, kFunction); - EXPECT_EQ(0u, listeners.GetNumListeners()); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kEvent)); - EXPECT_FALSE(listeners.HasListener(function_a)); - - v8::Local<v8::Object> empty_filter; - std::string error; - - // Register function_a with no filter; this is equivalent to registering for - // all events. The callback should be triggered since this is a 0 -> 1 - // transition. - // Note that we don't test the passed filter here. This is mostly because it's - // a pain to match against a DictionaryValue (which doesn't have an - // operator==). - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - testing::NotNull(), context)); - EXPECT_TRUE(listeners.AddListener(function_a, empty_filter, context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - - // function_a should be registered, and should be returned when we get the - // listeners. - EXPECT_TRUE(listeners.HasListener(function_a)); - EXPECT_EQ(1u, listeners.GetNumListeners()); - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_a)); - - // It should also be registered in the event filter. - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kEvent)); - - // Since function_a has no filter, associating a specific url should still - // return function_a. - std::unique_ptr<base::DictionaryValue> filtering_info_match_dict = - DictionaryValueFromString("{'url': 'http://example.com/foo'}"); - ASSERT_TRUE(filtering_info_match_dict); - EventFilteringInfo filtering_info_match(*filtering_info_match_dict); - EXPECT_THAT(listeners.GetListeners(&filtering_info_match, context), - testing::UnorderedElementsAre(function_a)); - - // Trying to add function_a again should have no effect. - EXPECT_FALSE( - listeners.AddListener(function_a, empty_filter, context, &error)); - EXPECT_TRUE(listeners.HasListener(function_a)); - EXPECT_EQ(1u, listeners.GetNumListeners()); - - v8::Local<v8::Function> function_b = FunctionFromString(context, kFunction); - - // function_b should not yet be registered, and trying to remove it should - // have no effect. - EXPECT_FALSE(listeners.HasListener(function_b)); - listeners.RemoveListener(function_b, context); - EXPECT_EQ(1u, listeners.GetNumListeners()); - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_a)); - - // Register function_b with a filter for pathContains: 'foo'. Unlike - // unfiltered listeners, this *should* trigger the callback, since there is - // no other listener registered with this same filter. - v8::Local<v8::Object> path_filter; - { - v8::Local<v8::Value> val = - V8ValueFromScriptSource(context, "({url: [{pathContains: 'foo'}]})"); - ASSERT_TRUE(val->IsObject()); - path_filter = val.As<v8::Object>(); - } - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - testing::NotNull(), context)); - EXPECT_TRUE(listeners.AddListener(function_b, path_filter, context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - - // function_b should be present. - EXPECT_TRUE(listeners.HasListener(function_b)); - EXPECT_EQ(2u, listeners.GetNumListeners()); - EXPECT_EQ(2, event_filter.GetMatcherCountForEventForTesting(kEvent)); - - // function_b should ignore calls that don't specify an url, since they, by - // definition, don't match. - EXPECT_THAT(listeners.GetListeners(nullptr, context), - testing::UnorderedElementsAre(function_a)); - // function_b should be included for matching urls... - EXPECT_THAT(listeners.GetListeners(&filtering_info_match, context), - testing::UnorderedElementsAre(function_a, function_b)); - // ... but not urls that don't match. - std::unique_ptr<base::DictionaryValue> filtering_info_no_match_dict = - DictionaryValueFromString("{'url': 'http://example.com/bar'}"); - ASSERT_TRUE(filtering_info_no_match_dict); - EventFilteringInfo filtering_info_no_match(*filtering_info_no_match_dict); - EXPECT_THAT(listeners.GetListeners(&filtering_info_no_match, context), - testing::UnorderedElementsAre(function_a)); - - // Remove function_a. Since filtered listeners notify whenever there's a - // change in listeners registered with a specific filter, this should trigger - // the callback. - EXPECT_CALL(handler, Run(binding::EventListenersChanged::NO_LISTENERS, - testing::NotNull(), context)); - listeners.RemoveListener(function_a, context); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_FALSE(listeners.HasListener(function_a)); - EXPECT_EQ(1u, listeners.GetNumListeners()); - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kEvent)); - // function_b should be the only listener remaining, so we shouldn't find - // any listeners for events without matching filters. - EXPECT_TRUE(listeners.GetListeners(nullptr, context).empty()); - EXPECT_THAT(listeners.GetListeners(&filtering_info_match, context), - testing::UnorderedElementsAre(function_b)); - EXPECT_TRUE( - listeners.GetListeners(&filtering_info_no_match, context).empty()); - - // Remove function_b. No listeners should remain. - EXPECT_CALL(handler, Run(binding::EventListenersChanged::NO_LISTENERS, - testing::NotNull(), context)); - listeners.RemoveListener(function_b, context); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_FALSE(listeners.HasListener(function_b)); - EXPECT_EQ(0u, listeners.GetNumListeners()); - EXPECT_TRUE(listeners.GetListeners(nullptr, context).empty()); - EXPECT_TRUE(listeners.GetListeners(&filtering_info_match, context).empty()); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kEvent)); -} - -// Tests that adding multiple listeners with the same filter doesn't trigger -// the update callback. -TEST_F(APIEventListenersTest, - UnfilteredListenersWithSameFilterDontTriggerUpdate) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - MockEventChangeHandler handler; - EventFilter event_filter; - FilteredEventListeners listeners(handler.Get(), kEvent, &event_filter); - - auto get_filter = [context]() { - return V8ValueFromScriptSource(context, "({url: [{pathContains: 'foo'}]})") - .As<v8::Object>(); - }; - - v8::Local<v8::Function> function_a = FunctionFromString(context, kFunction); - - std::string error; - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - testing::NotNull(), context)); - EXPECT_TRUE(listeners.AddListener(function_a, get_filter(), context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kEvent)); - - v8::Local<v8::Function> function_b = FunctionFromString(context, kFunction); - v8::Local<v8::Function> function_c = FunctionFromString(context, kFunction); - EXPECT_TRUE(listeners.AddListener(function_b, get_filter(), context, &error)); - EXPECT_TRUE(listeners.AddListener(function_c, get_filter(), context, &error)); - EXPECT_EQ(3u, listeners.GetNumListeners()); - EXPECT_EQ(3, event_filter.GetMatcherCountForEventForTesting(kEvent)); - - std::unique_ptr<base::DictionaryValue> filtering_info_match_dict = - DictionaryValueFromString("{'url': 'http://example.com/foo'}"); - ASSERT_TRUE(filtering_info_match_dict); - EventFilteringInfo filtering_info_match(*filtering_info_match_dict); - EXPECT_THAT( - listeners.GetListeners(&filtering_info_match, context), - testing::UnorderedElementsAre(function_a, function_b, function_c)); - - listeners.RemoveListener(function_c, context); - listeners.RemoveListener(function_b, context); - - EXPECT_CALL(handler, Run(binding::EventListenersChanged::NO_LISTENERS, - testing::NotNull(), context)); - listeners.RemoveListener(function_a, context); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kEvent)); -} - -// Tests that trying to add a listener with an invalid filter fails. -TEST_F(APIEventListenersTest, UnfilteredListenersError) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - EventFilter event_filter; - FilteredEventListeners listeners(base::Bind(&DoNothingOnUpdate), kEvent, - &event_filter); - - v8::Local<v8::Object> invalid_filter = - V8ValueFromScriptSource(context, "({url: 'some string'})") - .As<v8::Object>(); - v8::Local<v8::Function> function = FunctionFromString(context, kFunction); - std::string error; - EXPECT_FALSE( - listeners.AddListener(function, invalid_filter, context, &error)); - EXPECT_FALSE(error.empty()); -} - -// Tests that adding listeners for multiple different events is correctly -// recorded in the EventFilter. -TEST_F(APIEventListenersTest, MultipleUnfilteredListenerEvents) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - const char kAlpha[] = "alpha"; - const char kBeta[] = "beta"; - - EventFilter event_filter; - FilteredEventListeners listeners_a(base::Bind(&DoNothingOnUpdate), kAlpha, - &event_filter); - FilteredEventListeners listeners_b(base::Bind(&DoNothingOnUpdate), kBeta, - &event_filter); - - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kAlpha)); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kBeta)); - - std::string error; - v8::Local<v8::Object> filter; - - v8::Local<v8::Function> function_a = FunctionFromString(context, kFunction); - EXPECT_TRUE(listeners_a.AddListener(function_a, filter, context, &error)); - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kAlpha)); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kBeta)); - - v8::Local<v8::Function> function_b = FunctionFromString(context, kFunction); - EXPECT_TRUE(listeners_b.AddListener(function_b, filter, context, &error)); - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kAlpha)); - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kBeta)); - - listeners_b.RemoveListener(function_b, context); - EXPECT_EQ(1, event_filter.GetMatcherCountForEventForTesting(kAlpha)); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kBeta)); - - listeners_a.RemoveListener(function_a, context); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kAlpha)); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kBeta)); -} - -// Tests the invalidation of filtered listeners. -TEST_F(APIEventListenersTest, FilteredListenersInvalidation) { - v8::HandleScope handle_scope(isolate()); - v8::Local<v8::Context> context = MainContext(); - - MockEventChangeHandler handler; - EventFilter event_filter; - FilteredEventListeners listeners(handler.Get(), kEvent, &event_filter); - listeners.Invalidate(context); - - v8::Local<v8::Object> empty_filter; - v8::Local<v8::Object> filter = - V8ValueFromScriptSource(context, "({url: [{pathContains: 'foo'}]})") - .As<v8::Object>(); - std::string error; - - v8::Local<v8::Function> function_a = FunctionFromString(context, kFunction); - v8::Local<v8::Function> function_b = FunctionFromString(context, kFunction); - v8::Local<v8::Function> function_c = FunctionFromString(context, kFunction); - - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - testing::NotNull(), context)); - EXPECT_TRUE(listeners.AddListener(function_a, empty_filter, context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_CALL(handler, Run(binding::EventListenersChanged::HAS_LISTENERS, - testing::NotNull(), context)); - EXPECT_TRUE(listeners.AddListener(function_b, filter, context, &error)); - ::testing::Mock::VerifyAndClearExpectations(&handler); - EXPECT_TRUE(listeners.AddListener(function_c, filter, context, &error)); - - // Since two listener filters are present in the list, we should be notified - // of each going away when we invalidate the context. - EXPECT_CALL(handler, Run(binding::EventListenersChanged::NO_LISTENERS, - testing::NotNull(), context)) - .Times(2); - listeners.Invalidate(context); - ::testing::Mock::VerifyAndClearExpectations(&handler); - - EXPECT_EQ(0u, listeners.GetNumListeners()); - EXPECT_EQ(0, event_filter.GetMatcherCountForEventForTesting(kEvent)); -} - -} // namespace extensions
diff --git a/extensions/renderer/chrome_setting.cc b/extensions/renderer/chrome_setting.cc index b8830429..13f5c204 100644 --- a/extensions/renderer/chrome_setting.cc +++ b/extensions/renderer/chrome_setting.cc
@@ -143,11 +143,10 @@ DCHECK(!event.IsEmpty()); if (event->IsUndefined()) { - bool supports_filters = false; event = event_handler_->CreateEventInstance( base::StringPrintf("types.ChromeSetting.%s.onChange", pref_name_.c_str()), - supports_filters, context); + context); v8::Maybe<bool> set_result = wrapper->SetPrivate(context, key, event); if (!set_result.IsJust() || !set_result.FromJust()) { NOTREACHED();
diff --git a/extensions/renderer/dispatcher.cc b/extensions/renderer/dispatcher.cc index 72df5e0..4dcf038 100644 --- a/extensions/renderer/dispatcher.cc +++ b/extensions/renderer/dispatcher.cc
@@ -206,34 +206,14 @@ // background pages. void SendEventListenersIPC(binding::EventListenersChanged changed, ScriptContext* context, - const std::string& event_name, - const base::DictionaryValue* filter) { - // TODO(devlin): Fix this. We need to account for lazy listeners, but it - // also depends on if the listener is removed due to the context being torn - // down or the extension unregistering. - bool lazy = false; - std::string extension_id = context->GetExtensionID(); - - if (filter) { - if (changed == binding::EventListenersChanged::HAS_LISTENERS) { - content::RenderThread::Get()->Send( - new ExtensionHostMsg_AddFilteredListener(extension_id, event_name, - *filter, lazy)); - } else { - DCHECK_EQ(binding::EventListenersChanged::NO_LISTENERS, changed); - content::RenderThread::Get()->Send( - new ExtensionHostMsg_RemoveFilteredListener(extension_id, event_name, - *filter, lazy)); - } + const std::string& event_name) { + if (changed == binding::EventListenersChanged::HAS_LISTENERS) { + content::RenderThread::Get()->Send(new ExtensionHostMsg_AddListener( + context->GetExtensionID(), context->url(), event_name)); } else { - if (changed == binding::EventListenersChanged::HAS_LISTENERS) { - content::RenderThread::Get()->Send(new ExtensionHostMsg_AddListener( - context->GetExtensionID(), context->url(), event_name)); - } else { - DCHECK_EQ(binding::EventListenersChanged::NO_LISTENERS, changed); - content::RenderThread::Get()->Send(new ExtensionHostMsg_RemoveListener( - context->GetExtensionID(), context->url(), event_name)); - } + DCHECK_EQ(binding::EventListenersChanged::NO_LISTENERS, changed); + content::RenderThread::Get()->Send(new ExtensionHostMsg_RemoveListener( + context->GetExtensionID(), context->url(), event_name)); } }
diff --git a/extensions/renderer/event_emitter.cc b/extensions/renderer/event_emitter.cc index e6cd977..82523e96 100644 --- a/extensions/renderer/event_emitter.cc +++ b/extensions/renderer/event_emitter.cc
@@ -6,7 +6,6 @@ #include <algorithm> -#include "extensions/renderer/api_event_listeners.h" #include "gin/object_template_builder.h" #include "gin/per_context_data.h" @@ -14,12 +13,9 @@ gin::WrapperInfo EventEmitter::kWrapperInfo = {gin::kEmbedderNativeGin}; -EventEmitter::EventEmitter(bool supports_filters, - std::unique_ptr<APIEventListeners> listeners, - const binding::RunJSFunction& run_js) - : supports_filters_(supports_filters), - listeners_(std::move(listeners)), - run_js_(run_js) {} +EventEmitter::EventEmitter(const binding::RunJSFunction& run_js, + const ListenersChangedMethod& listeners_changed) + : run_js_(run_js), listeners_changed_(listeners_changed) {} EventEmitter::~EventEmitter() {} @@ -38,11 +34,13 @@ } void EventEmitter::Fire(v8::Local<v8::Context> context, - std::vector<v8::Local<v8::Value>>* args, - const EventFilteringInfo* filter) { - // Note that |listeners_| can be modified during handling. - std::vector<v8::Local<v8::Function>> listeners = - listeners_->GetListeners(filter, context); + std::vector<v8::Local<v8::Value>>* args) { + // We create a local copy of listeners_ since the array can be modified during + // handling. + std::vector<v8::Local<v8::Function>> listeners; + listeners.reserve(listeners_.size()); + for (const auto& listener : listeners_) + listeners.push_back(listener.Get(context->GetIsolate())); for (const auto& listener : listeners) { v8::TryCatch try_catch(context->GetIsolate()); @@ -54,13 +52,9 @@ } } -void EventEmitter::Invalidate(v8::Local<v8::Context> context) { +void EventEmitter::Invalidate() { valid_ = false; - listeners_->Invalidate(context); -} - -size_t EventEmitter::GetNumListeners() const { - return listeners_->GetNumListeners(); + listeners_.clear(); } void EventEmitter::AddListener(gin::Arguments* arguments) { @@ -78,17 +72,6 @@ if (!arguments->GetNext(&listener)) return; - if (!arguments->PeekNext().IsEmpty() && !supports_filters_) { - arguments->ThrowTypeError("This event does not support filters"); - return; - } - - v8::Local<v8::Object> filter; - if (!arguments->PeekNext().IsEmpty() && !arguments->GetNext(&filter)) { - arguments->ThrowTypeError("Invalid invocation"); - return; - } - v8::Local<v8::Object> holder; CHECK(arguments->GetHolder(&holder)); CHECK(!holder.IsEmpty()); @@ -96,10 +79,13 @@ if (!gin::PerContextData::From(context)) return; - std::string error; - if (!listeners_->AddListener(listener, filter, context, &error) && - !error.empty()) { - arguments->ThrowTypeError(error); + if (!HasListener(listener)) { + listeners_.push_back( + v8::Global<v8::Function>(arguments->isolate(), listener)); + if (listeners_.size() == 1) { + listeners_changed_.Run(binding::EventListenersChanged::HAS_LISTENERS, + context); + } } } @@ -113,26 +99,34 @@ if (!arguments->GetNext(&listener)) return; - v8::Local<v8::Object> holder; - CHECK(arguments->GetHolder(&holder)); - CHECK(!holder.IsEmpty()); - v8::Local<v8::Context> context = holder->CreationContext(); - listeners_->RemoveListener(listener, context); + auto iter = std::find(listeners_.begin(), listeners_.end(), listener); + if (iter != listeners_.end()) { + listeners_.erase(iter); + if (listeners_.empty()) { + v8::Local<v8::Object> holder; + CHECK(arguments->GetHolder(&holder)); + CHECK(!holder.IsEmpty()); + v8::Local<v8::Context> context = holder->CreationContext(); + listeners_changed_.Run(binding::EventListenersChanged::NO_LISTENERS, + context); + } + } } bool EventEmitter::HasListener(v8::Local<v8::Function> listener) { - return listeners_->HasListener(listener); + return std::find(listeners_.begin(), listeners_.end(), listener) != + listeners_.end(); } bool EventEmitter::HasListeners() { - return listeners_->GetNumListeners() != 0; + return !listeners_.empty(); } void EventEmitter::Dispatch(gin::Arguments* arguments) { if (!valid_) return; - if (listeners_->GetNumListeners() == 0) + if (listeners_.empty()) return; v8::HandleScope handle_scope(arguments->isolate()); v8::Local<v8::Context> context = @@ -142,7 +136,7 @@ // Converting to v8::Values should never fail. CHECK(arguments->GetRemaining(&v8_args)); } - Fire(context, &v8_args, nullptr); + Fire(context, &v8_args); } } // namespace extensions
diff --git a/extensions/renderer/event_emitter.h b/extensions/renderer/event_emitter.h index e843323..3f5b95b7 100644 --- a/extensions/renderer/event_emitter.h +++ b/extensions/renderer/event_emitter.h
@@ -16,17 +16,19 @@ } namespace extensions { -class APIEventListeners; -class EventFilteringInfo; // A gin::Wrappable Event object. One is expected to be created per event, per // context. Note: this object *does not* clear any events, so it must be // destroyed with the context to avoid leaking. class EventEmitter final : public gin::Wrappable<EventEmitter> { public: - EventEmitter(bool supports_filters, - std::unique_ptr<APIEventListeners> listeners, - const binding::RunJSFunction& run_js); + using Listeners = std::vector<v8::Global<v8::Function>>; + using ListenersChangedMethod = + base::Callback<void(binding::EventListenersChanged, + v8::Local<v8::Context>)>; + + EventEmitter(const binding::RunJSFunction& run_js, + const ListenersChangedMethod& listeners_changed); ~EventEmitter() override; static gin::WrapperInfo kWrapperInfo; @@ -36,14 +38,13 @@ v8::Isolate* isolate) final; void Fire(v8::Local<v8::Context> context, - std::vector<v8::Local<v8::Value>>* args, - const EventFilteringInfo* filter); + std::vector<v8::Local<v8::Value>>* args); // Removes all listeners and marks this object as invalid so that no more // are added. - void Invalidate(v8::Local<v8::Context> context); + void Invalidate(); - size_t GetNumListeners() const; + const Listeners* listeners() const { return &listeners_; } private: // Bound methods for the Event JS object. @@ -57,13 +58,20 @@ // When invalid, no listeners can be added or removed. bool valid_ = true; - // Whether the event supports filters. - bool supports_filters_ = false; - - std::unique_ptr<APIEventListeners> listeners_; + // The event listeners associated with this event. + // TODO(devlin): Having these listeners held as v8::Globals means that we + // need to worry about cycles when a listener holds a reference to the event, + // e.g. EventEmitter -> Listener -> EventEmitter. Right now, we handle that by + // requiring Invalidate() to be called, but that means that events that aren't + // Invalidate()'d earlier can leak until context destruction. We could + // circumvent this by storing the listeners strongly in a private propery + // (thus traceable by v8), and optionally keep a weak cache on this object. + Listeners listeners_; binding::RunJSFunction run_js_; + ListenersChangedMethod listeners_changed_; + DISALLOW_COPY_AND_ASSIGN(EventEmitter); };
diff --git a/extensions/renderer/native_extension_bindings_system.cc b/extensions/renderer/native_extension_bindings_system.cc index c777890f..0e65f47 100644 --- a/extensions/renderer/native_extension_bindings_system.cc +++ b/extensions/renderer/native_extension_bindings_system.cc
@@ -8,7 +8,6 @@ #include "base/memory/ptr_util.h" #include "content/public/common/content_switches.h" #include "extensions/common/constants.h" -#include "extensions/common/event_filtering_info.h" #include "extensions/common/extension_api.h" #include "extensions/common/extension_messages.h" #include "extensions/common/features/feature_provider.h" @@ -447,15 +446,13 @@ void NativeExtensionBindingsSystem::DispatchEventInContext( const std::string& event_name, const base::ListValue* event_args, - const base::DictionaryValue* filtering_info_dict, + const base::DictionaryValue* filtering_info, ScriptContext* context) { v8::HandleScope handle_scope(context->isolate()); v8::Context::Scope context_scope(context->v8_context()); - EventFilteringInfo filter; - if (filtering_info_dict) - filter = EventFilteringInfo(*filtering_info_dict); - api_system_.FireEventInContext(event_name, context->v8_context(), *event_args, - filter); + // TODO(devlin): Take into account |filtering_info|. + api_system_.FireEventInContext(event_name, context->v8_context(), + *event_args); } void NativeExtensionBindingsSystem::HandleResponse( @@ -637,11 +634,9 @@ void NativeExtensionBindingsSystem::OnEventListenerChanged( const std::string& event_name, binding::EventListenersChanged change, - const base::DictionaryValue* filter, v8::Local<v8::Context> context) { - send_event_listener_ipc_.Run(change, - ScriptContextSet::GetContextByV8Context(context), - event_name, filter); + send_event_listener_ipc_.Run( + change, ScriptContextSet::GetContextByV8Context(context), event_name); } void NativeExtensionBindingsSystem::GetJSBindingUtil(
diff --git a/extensions/renderer/native_extension_bindings_system.h b/extensions/renderer/native_extension_bindings_system.h index 40d9e795..22dc4d4 100644 --- a/extensions/renderer/native_extension_bindings_system.h +++ b/extensions/renderer/native_extension_bindings_system.h
@@ -34,8 +34,7 @@ using SendEventListenerIPCMethod = base::Callback<void(binding::EventListenersChanged, ScriptContext*, - const std::string& event_name, - const base::DictionaryValue* filter)>; + const std::string& event_name)>; NativeExtensionBindingsSystem( const SendRequestIPCMethod& send_request_ipc, @@ -66,7 +65,6 @@ // to |send_event_listener_ipc_|. void OnEventListenerChanged(const std::string& event_name, binding::EventListenersChanged change, - const base::DictionaryValue* filter, v8::Local<v8::Context> context); // Getter callback for an extension API, since APIs are constructed lazily.
diff --git a/extensions/renderer/native_extension_bindings_system_unittest.cc b/extensions/renderer/native_extension_bindings_system_unittest.cc index 45af2f4..7e82ac2 100644 --- a/extensions/renderer/native_extension_bindings_system_unittest.cc +++ b/extensions/renderer/native_extension_bindings_system_unittest.cc
@@ -68,11 +68,10 @@ class EventChangeHandler { public: - MOCK_METHOD4(OnChange, + MOCK_METHOD3(OnChange, void(binding::EventListenersChanged, ScriptContext*, - const std::string& event_name, - const base::DictionaryValue* filter)); + const std::string& event_name)); }; } // namespace @@ -131,10 +130,9 @@ void MockSendListenerIPC(binding::EventListenersChanged changed, ScriptContext* context, - const std::string& event_name, - const base::DictionaryValue* filter) { + const std::string& event_name) { if (event_change_handler_) - event_change_handler_->OnChange(changed, context, event_name, filter); + event_change_handler_->OnChange(changed, context, event_name); } ScriptContext* CreateScriptContext(v8::Local<v8::Context> v8_context, @@ -569,10 +567,11 @@ "});"; v8::Local<v8::Function> add_listener = FunctionFromString(context, kAddListener); - EXPECT_CALL(*event_change_handler(), - OnChange(binding::EventListenersChanged::HAS_LISTENERS, - script_context, kEventName, nullptr)) - .Times(1); + EXPECT_CALL( + *event_change_handler(), + OnChange(binding::EventListenersChanged::HAS_LISTENERS, + script_context, + kEventName)).Times(1); v8::Local<v8::Value> argv[] = {listener}; RunFunction(add_listener, context, arraysize(argv), argv); ::testing::Mock::VerifyAndClearExpectations(event_change_handler()); @@ -582,10 +581,11 @@ "(function(listener) {\n" " chrome.idle.onStateChanged.removeListener(listener);\n" "});"; - EXPECT_CALL(*event_change_handler(), - OnChange(binding::EventListenersChanged::NO_LISTENERS, - script_context, kEventName, nullptr)) - .Times(1); + EXPECT_CALL( + *event_change_handler(), + OnChange(binding::EventListenersChanged::NO_LISTENERS, + script_context, + kEventName)).Times(1); v8::Local<v8::Function> remove_listener = FunctionFromString(context, kRemoveListener); RunFunction(remove_listener, context, arraysize(argv), argv); @@ -627,7 +627,7 @@ FunctionFromString(context, kUseAppRuntime); EXPECT_CALL(*event_change_handler(), OnChange(binding::EventListenersChanged::HAS_LISTENERS, - script_context, "app.runtime.onLaunched", nullptr)) + script_context, "app.runtime.onLaunched")) .Times(1); RunFunctionOnGlobal(use_app_runtime, context, 0, nullptr); ::testing::Mock::VerifyAndClearExpectations(event_change_handler());
diff --git a/extensions/renderer/resources/guest_view/guest_view_events.js b/extensions/renderer/resources/guest_view/guest_view_events.js index 1e4c3fe..33d89a26 100644 --- a/extensions/renderer/resources/guest_view/guest_view_events.js +++ b/extensions/renderer/resources/guest_view/guest_view_events.js
@@ -10,11 +10,9 @@ var EventBindings; var CreateEvent = function(name) { - if (bindingUtil) { - return bindingUtil.createCustomEvent(name, null, - true /* supportsFilters */); - } var eventOpts = {supportsListeners: true, supportsFilters: true}; + if (bindingUtil) + return bindingUtil.createCustomEvent(name, null, eventOpts); if (!EventBindings) EventBindings = require('event_bindings'); return new EventBindings.Event(name, undefined, eventOpts);
diff --git a/extensions/renderer/resources/messaging.js b/extensions/renderer/resources/messaging.js index 86c110b..37bbf72 100644 --- a/extensions/renderer/resources/messaging.js +++ b/extensions/renderer/resources/messaging.js
@@ -23,17 +23,11 @@ var kPortClosedError = 'Attempting to use a disconnected port object'; var jsEvent; - function createAnonymousEvent(schema) { + function createAnonymousEvent(schema, options) { if (bindingUtil) { // Native custom events ignore schema. - var supportsFilters = false; - return bindingUtil.createCustomEvent(undefined, undefined, - supportsFilters); + return bindingUtil.createCustomEvent(undefined, undefined, options); } - var options = { - __proto__: null, - unmanaged: true, - }; if (!jsEvent) jsEvent = require('event_bindings').Event; return new jsEvent(undefined, schema, options); @@ -67,8 +61,12 @@ type: 'any', optional: true, }; - this.onDisconnect = createAnonymousEvent([portSchema]); - this.onMessage = createAnonymousEvent([messageSchema, portSchema]); + var options = { + __proto__: null, + unmanaged: true, + }; + this.onDisconnect = createAnonymousEvent([portSchema], options); + this.onMessage = createAnonymousEvent([messageSchema, portSchema], options); } $Object.setPrototypeOf(PortImpl.prototype, null);
diff --git a/extensions/renderer/worker_thread_dispatcher.cc b/extensions/renderer/worker_thread_dispatcher.cc index 20921fa1..2e2b9c4 100644 --- a/extensions/renderer/worker_thread_dispatcher.cc +++ b/extensions/renderer/worker_thread_dispatcher.cc
@@ -56,8 +56,7 @@ void SendEventListenersIPC(binding::EventListenersChanged changed, ScriptContext* context, - const std::string& event_name, - const base::DictionaryValue* filter) { + const std::string& event_name) { // TODO(devlin/lazyboy): Wire this up once extension workers support events. }
diff --git a/ios/chrome/app/application_delegate/metrics_mediator.h b/ios/chrome/app/application_delegate/metrics_mediator.h index 8c77b8c..6fb6530a9 100644 --- a/ios/chrome/app/application_delegate/metrics_mediator.h +++ b/ios/chrome/app/application_delegate/metrics_mediator.h
@@ -28,8 +28,8 @@ - (BOOL)isUploadingEnabled; // Starts or stops the metrics service and crash report recording and/or // uploading, based on the current user preferences. Makes sure helper -// mechanisms and the wwan state observer are set up if necessary. Called both -// on initialization and after user triggered preference change. +// mechanisms and the wwan state observer are set up if necessary. Must be +// called both on initialization and after user triggered preference change. // |isUserTriggered| is used to distinguish between those cases. - (void)updateMetricsStateBasedOnPrefsUserTriggered:(BOOL)isUserTriggered; // Logs the duration of the cold start startup. Does nothing if there isn't a
diff --git a/ios/chrome/browser/reading_list/reading_list_download_service.cc b/ios/chrome/browser/reading_list/reading_list_download_service.cc index 59286fb..a79cca8a 100644 --- a/ios/chrome/browser/reading_list/reading_list_download_service.cc +++ b/ios/chrome/browser/reading_list/reading_list_download_service.cc
@@ -247,10 +247,8 @@ switch (real_success_value) { case URLDownloader::DOWNLOAD_SUCCESS: case URLDownloader::DOWNLOAD_EXISTS: { - int64_t now = - (base::Time::Now() - base::Time::UnixEpoch()).InMicroseconds(); - reading_list_model_->SetEntryDistilledInfo(url, distilled_path, - distilled_url, size, now); + reading_list_model_->SetEntryDistilledInfo( + url, distilled_path, distilled_url, size, base::Time::Now()); std::string trimmed_title = base::CollapseWhitespaceASCII(title, false); if (!trimmed_title.empty())
diff --git a/ios/chrome/browser/reading_list/reading_list_model_factory.cc b/ios/chrome/browser/reading_list/reading_list_model_factory.cc index 5d115aa..c225690 100644 --- a/ios/chrome/browser/reading_list/reading_list_model_factory.cc +++ b/ios/chrome/browser/reading_list/reading_list_model_factory.cc
@@ -9,6 +9,7 @@ #include "base/files/file_path.h" #include "base/memory/ptr_util.h" #include "base/memory/singleton.h" +#include "base/time/default_clock.h" #include "components/browser_sync/profile_sync_service.h" #include "components/keyed_service/ios/browser_state_dependency_manager.h" #include "components/pref_registry/pref_registry_syncable.h" @@ -77,8 +78,9 @@ GetChannel()))); std::unique_ptr<KeyedService> reading_list_model = - base::MakeUnique<ReadingListModelImpl>(std::move(store), - chrome_browser_state->GetPrefs()); + base::MakeUnique<ReadingListModelImpl>( + std::move(store), chrome_browser_state->GetPrefs(), + base::MakeUnique<base::DefaultClock>()); return reading_list_model; }
diff --git a/ios/chrome/browser/reading_list/reading_list_web_state_observer_unittest.mm b/ios/chrome/browser/reading_list/reading_list_web_state_observer_unittest.mm index 6c31e54..3700543 100644 --- a/ios/chrome/browser/reading_list/reading_list_web_state_observer_unittest.mm +++ b/ios/chrome/browser/reading_list/reading_list_web_state_observer_unittest.mm
@@ -5,6 +5,7 @@ #include "ios/chrome/browser/reading_list/reading_list_web_state_observer.h" #include "base/memory/ptr_util.h" +#include "base/time/default_clock.h" #include "components/reading_list/ios/reading_list_model_impl.h" #include "ios/chrome/browser/reading_list/offline_url_utils.h" #import "ios/web/public/navigation_item.h" @@ -58,8 +59,8 @@ test_navigation_manager->SetPendingItem(pending_item_.get()); test_navigation_manager->SetLastCommittedItem(last_committed_item_.get()); test_web_state_.SetNavigationManager(std::move(test_navigation_manager)); - reading_list_model_ = - base::MakeUnique<ReadingListModelImpl>(nullptr, nullptr); + reading_list_model_ = base::MakeUnique<ReadingListModelImpl>( + nullptr, nullptr, base::MakeUnique<base::DefaultClock>()); reading_list_model_->AddEntry(GURL(kTestURL), kTestTitle, reading_list::ADDED_VIA_CURRENT_APP); ReadingListWebStateObserver::FromWebState(&test_web_state_, @@ -97,7 +98,8 @@ GURL url(kTestURL); std::string distilled_path = kTestDistilledPath; reading_list_model_->SetEntryDistilledInfo( - url, base::FilePath(distilled_path), GURL(kTestDistilledURL), 50, 100); + url, base::FilePath(distilled_path), GURL(kTestDistilledURL), 50, + base::Time::FromTimeT(100)); const ReadingListEntry* entry = reading_list_model_->GetEntryByURL(url); test_navigation_manager_->GetPendingItem()->SetURL(url); @@ -119,7 +121,8 @@ GURL url(kTestURL); std::string distilled_path = kTestDistilledPath; reading_list_model_->SetEntryDistilledInfo( - url, base::FilePath(distilled_path), GURL(kTestDistilledURL), 50, 100); + url, base::FilePath(distilled_path), GURL(kTestDistilledURL), 50, + base::Time::FromTimeT(100)); const ReadingListEntry* entry = reading_list_model_->GetEntryByURL(url); GURL distilled_url = reading_list::OfflineURLForPath( entry->DistilledPath(), entry->URL(), entry->DistilledURL()); @@ -145,7 +148,8 @@ GURL url(kTestURL); std::string distilled_path = kTestDistilledPath; reading_list_model_->SetEntryDistilledInfo( - url, base::FilePath(distilled_path), GURL(kTestDistilledURL), 50, 100); + url, base::FilePath(distilled_path), GURL(kTestDistilledURL), 50, + base::Time::FromTimeT(100)); const ReadingListEntry* entry = reading_list_model_->GetEntryByURL(url); GURL distilled_url = reading_list::OfflineURLForPath( entry->DistilledPath(), entry->URL(), entry->DistilledURL());
diff --git a/ios/chrome/browser/ui/reading_list/reading_list_collection_view_controller_unittest.mm b/ios/chrome/browser/ui/reading_list/reading_list_collection_view_controller_unittest.mm index 487755f4..47e643f 100644 --- a/ios/chrome/browser/ui/reading_list/reading_list_collection_view_controller_unittest.mm +++ b/ios/chrome/browser/ui/reading_list/reading_list_collection_view_controller_unittest.mm
@@ -12,6 +12,7 @@ #include "base/single_thread_task_runner.h" #include "base/strings/sys_string_conversions.h" #include "base/threading/thread_task_runner_handle.h" +#include "base/time/default_clock.h" #include "components/favicon/core/large_icon_service.h" #include "components/favicon/core/test/mock_favicon_service.h" #include "components/reading_list/ios/reading_list_model.h" @@ -56,7 +57,8 @@ GetLargestRawFaviconForPageURL(_, _, _, _, _)) .WillRepeatedly(PostReply<5>(favicon_base::FaviconRawBitmapResult())); - reading_list_model_.reset(new ReadingListModelImpl(nullptr, nullptr)); + reading_list_model_.reset(new ReadingListModelImpl( + nullptr, nullptr, base::MakeUnique<base::DefaultClock>())); large_icon_service_.reset(new favicon::LargeIconService( &mock_favicon_service_, base::ThreadTaskRunnerHandle::Get())); reading_list_view_controller_.reset( @@ -190,7 +192,7 @@ reading_list::ADDED_VIA_CURRENT_APP); int64_t size = 50; reading_list_model_->SetEntryDistilledInfo(url, distilled_path, distilled_url, - size, 100); + size, base::Time::FromTimeT(100)); // Load view. [reading_list_view_controller_ view]; DCHECK([reading_list_view_controller_.get().collectionView
diff --git a/ios/chrome/browser/ui/reading_list/reading_list_coordinator_unittest.mm b/ios/chrome/browser/ui/reading_list/reading_list_coordinator_unittest.mm index 2178f30..2edf96e 100644 --- a/ios/chrome/browser/ui/reading_list/reading_list_coordinator_unittest.mm +++ b/ios/chrome/browser/ui/reading_list/reading_list_coordinator_unittest.mm
@@ -7,6 +7,7 @@ #include "base/mac/scoped_nsobject.h" #include "base/memory/ptr_util.h" #include "base/threading/thread_task_runner_handle.h" +#include "base/time/default_clock.h" #include "components/favicon/core/large_icon_service.h" #include "components/favicon/core/test/mock_favicon_service.h" #include "components/reading_list/ios/reading_list_entry.h" @@ -94,7 +95,8 @@ TestChromeBrowserState::Builder builder; browser_state_ = builder.Build(); - reading_list_model_.reset(new ReadingListModelImpl(nullptr, nullptr)); + reading_list_model_.reset(new ReadingListModelImpl( + nullptr, nullptr, base::MakeUnique<base::DefaultClock>())); large_icon_service_.reset(new favicon::LargeIconService( &mock_favicon_service_, base::ThreadTaskRunnerHandle::Get())); coordinator_.reset([[ReadingListCoordinator alloc] @@ -139,7 +141,7 @@ GURL url("https://chromium.org"); std::string title("Chromium"); std::unique_ptr<ReadingListEntry> entry = - base::MakeUnique<ReadingListEntry>(url, title); + base::MakeUnique<ReadingListEntry>(url, title, base::Time::FromTimeT(10)); ReadingListModel* model = GetReadingListModel(); model->AddEntry(url, title, reading_list::ADDED_VIA_CURRENT_APP);
diff --git a/ios/chrome/browser/ui/settings/settings_egtest.mm b/ios/chrome/browser/ui/settings/settings_egtest.mm index 87f28d3..8bcf82e 100644 --- a/ios/chrome/browser/ui/settings/settings_egtest.mm +++ b/ios/chrome/browser/ui/settings/settings_egtest.mm
@@ -605,11 +605,7 @@ metrics::prefs::kMetricsReportingEnabled, YES); chrome_test_util::SetBooleanLocalStatePref(prefs::kMetricsReportingWifiOnly, NO); - // Service should be always enabled regardless of network settings. - chrome_test_util::SetWWANStateTo(YES); [self assertMetricsServiceEnabled:serviceType]; - chrome_test_util::SetWWANStateTo(NO); - [self assertMetricsServiceDisabled:serviceType]; #else // Development build. Do not allow any recording or uploading of data. // Specifically, the kMetricsReportingEnabled preference is completely
diff --git a/media/gpu/video_decode_accelerator_unittest.cc b/media/gpu/video_decode_accelerator_unittest.cc index 16f528e..449d059 100644 --- a/media/gpu/video_decode_accelerator_unittest.cc +++ b/media/gpu/video_decode_accelerator_unittest.cc
@@ -143,6 +143,10 @@ // working directory. base::FilePath g_test_file_path; +// The location to output bad thumbnail image. If empty or invalid, fallback to +// the original location. +base::FilePath g_thumbnail_output_dir; + // Environment to store rendering thread. class VideoDecodeAcceleratorTestEnvironment; VideoDecodeAcceleratorTestEnvironment* g_env; @@ -199,7 +203,14 @@ base::FilePath GetTestDataFile(const base::FilePath& input_file) { if (input_file.IsAbsolute()) return input_file; - return base::MakeAbsoluteFilePath(g_test_file_path.Append(input_file)); + // input_file needs to be existed, otherwise base::MakeAbsoluteFilePath will + // return an empty base::FilePath. + base::FilePath abs_path = + base::MakeAbsoluteFilePath(g_test_file_path.Append(input_file)); + LOG_IF(ERROR, abs_path.empty()) + << g_test_file_path.Append(input_file).value().c_str() + << " is not an existing path."; + return abs_path; } // Read in golden MD5s for the thumbnailed rendering of this video @@ -1540,11 +1551,23 @@ LOG(ERROR) << "Unknown thumbnails MD5: " << md5_string; base::FilePath filepath(test_video_files_[0]->file_name); + if (!g_thumbnail_output_dir.empty() && + base::DirectoryExists(g_thumbnail_output_dir)) { + // Write bad thumbnails image to where --thumbnail_output_dir assigned. + filepath = g_thumbnail_output_dir.Append(filepath.BaseName()); + } else { + // Fallback to write to test data directory. + // Note: test data directory is not writable by vda_unittest while + // running by autotest. It should assign its resultsdir as output + // directory. + filepath = GetTestDataFile(filepath); + } filepath = filepath.AddExtension(FILE_PATH_LITERAL(".bad_thumbnails")); filepath = filepath.AddExtension(FILE_PATH_LITERAL(".png")); - int num_bytes = - base::WriteFile(GetTestDataFile(filepath), - reinterpret_cast<char*>(&png[0]), png.size()); + LOG(INFO) << "Write bad thumbnails image to: " + << filepath.value().c_str(); + int num_bytes = base::WriteFile( + filepath, reinterpret_cast<char*>(&png[0]), png.size()); EXPECT_EQ(num_bytes, static_cast<int>(png.size())); } EXPECT_NE(match, golden_md5s.end()); @@ -1888,6 +1911,9 @@ media::g_test_file_path = media::GetTestDataFilePath(""); continue; } + if (it->first == "thumbnail_output_dir") { + media::g_thumbnail_output_dir = base::FilePath(it->second.c_str()); + } } base::ShadowingAtExitManager at_exit_manager;
diff --git a/services/ui/gpu/gpu_service.cc b/services/ui/gpu/gpu_service.cc index 496de6b..75a81a6 100644 --- a/services/ui/gpu/gpu_service.cc +++ b/services/ui/gpu/gpu_service.cc
@@ -76,7 +76,6 @@ base::WaitableEvent* shutdown_event) { DCHECK(CalledOnValidThread()); DCHECK(!gpu_host_); - gpu_host_ = std::move(gpu_host); gpu_preferences_ = preferences; gpu_info_.video_decode_accelerator_capabilities = media::GpuVideoDecodeAccelerator::GetCapabilities(gpu_preferences_); @@ -85,8 +84,9 @@ gpu_info_.jpeg_decode_accelerator_supported = media::GpuJpegDecodeAcceleratorFactoryProvider:: IsAcceleratedJpegDecodeSupported(); - gpu_host_->DidInitialize(gpu_info_); - + gpu_host->DidInitialize(gpu_info_); + gpu_host_ = + mojom::ThreadSafeGpuHostPtr::Create(gpu_host.PassInterface(), io_runner_); sync_point_manager_ = sync_point_manager; if (!sync_point_manager_) { owned_sync_point_manager_ = base::MakeUnique<gpu::SyncPointManager>(); @@ -143,35 +143,35 @@ } void GpuService::DidCreateOffscreenContext(const GURL& active_url) { - gpu_host_->DidCreateOffscreenContext(active_url); + (*gpu_host_)->DidCreateOffscreenContext(active_url); } void GpuService::DidDestroyChannel(int client_id) { media_gpu_channel_manager_->RemoveChannel(client_id); - gpu_host_->DidDestroyChannel(client_id); + (*gpu_host_)->DidDestroyChannel(client_id); } void GpuService::DidDestroyOffscreenContext(const GURL& active_url) { - gpu_host_->DidDestroyOffscreenContext(active_url); + (*gpu_host_)->DidDestroyOffscreenContext(active_url); } void GpuService::DidLoseContext(bool offscreen, gpu::error::ContextLostReason reason, const GURL& active_url) { - gpu_host_->DidLoseContext(offscreen, reason, active_url); + (*gpu_host_)->DidLoseContext(offscreen, reason, active_url); } void GpuService::StoreShaderToDisk(int client_id, const std::string& key, const std::string& shader) { - gpu_host_->StoreShaderToDisk(client_id, key, shader); + (*gpu_host_)->StoreShaderToDisk(client_id, key, shader); } #if defined(OS_WIN) void GpuService::SendAcceleratedSurfaceCreatedChildWindow( gpu::SurfaceHandle parent_window, gpu::SurfaceHandle child_window) { - gpu_host_->SetChildSurface(parent_window, child_window); + (*gpu_host_)->SetChildSurface(parent_window, child_window); } #endif
diff --git a/services/ui/gpu/gpu_service.h b/services/ui/gpu/gpu_service.h index 3ad7b5e..9faf513 100644 --- a/services/ui/gpu/gpu_service.h +++ b/services/ui/gpu/gpu_service.h
@@ -155,7 +155,7 @@ // Information about general chrome feature support for the GPU. gpu::GpuFeatureInfo gpu_feature_info_; - mojom::GpuHostPtr gpu_host_; + scoped_refptr<mojom::ThreadSafeGpuHostPtr> gpu_host_; std::unique_ptr<gpu::GpuChannelManager> gpu_channel_manager_; std::unique_ptr<media::MediaGpuChannelManager> media_gpu_channel_manager_;
diff --git a/services/ui/public/interfaces/window_manager.mojom b/services/ui/public/interfaces/window_manager.mojom index b129058..ce13005 100644 --- a/services/ui/public/interfaces/window_manager.mojom +++ b/services/ui/public/interfaces/window_manager.mojom
@@ -4,6 +4,7 @@ module ui.mojom; +import "cc/ipc/frame_sink_id.mojom"; import "services/ui/public/interfaces/cursor.mojom"; import "services/ui/public/interfaces/event_matcher.mojom"; import "services/ui/public/interfaces/window_manager_constants.mojom"; @@ -152,7 +153,8 @@ // to this WindowManager for |display|. WmNewDisplayAdded(display.mojom.Display display, WindowData root, - bool parent_drawn); + bool parent_drawn, + cc.mojom.FrameSinkId frame_sink_id); // Called when a display is removed. The root of the specified display is // still valid. It is expected the client calls DeleteWindow() shortly after
diff --git a/services/ui/public/interfaces/window_tree.mojom b/services/ui/public/interfaces/window_tree.mojom index 5f7d95d..e38b7f0 100644 --- a/services/ui/public/interfaces/window_tree.mojom +++ b/services/ui/public/interfaces/window_tree.mojom
@@ -4,6 +4,7 @@ module ui.mojom; +import "cc/ipc/frame_sink_id.mojom"; import "cc/ipc/local_surface_id.mojom"; import "cc/ipc/surface_info.mojom"; import "cc/ipc/mojo_compositor_frame_sink.mojom"; @@ -321,7 +322,8 @@ WindowTree? tree, int64 display_id, uint32 focused_window, - bool parent_drawn); + bool parent_drawn, + cc.mojom.FrameSinkId frame_sink_id); // Invoked when the application embedded at |window| is disconnected. In other // words the embedded app closes the connection to the server. This is called @@ -344,7 +346,8 @@ OnTopLevelCreated(uint32 change_id, WindowData data, int64 display_id, - bool parent_drawn); + bool parent_drawn, + cc.mojom.FrameSinkId frame_sink_id); // Invoked when a window's bounds have changed. Only the client embedded in // |window| gets a non_empty |local_surface_id|.
diff --git a/services/ui/ws/frame_generator.cc b/services/ui/ws/frame_generator.cc index 87f0f14..ada61585 100644 --- a/services/ui/ws/frame_generator.cc +++ b/services/ui/ws/frame_generator.cc
@@ -98,17 +98,24 @@ const gfx::Transform& transform) {} void FrameGenerator::OnBeginFrame(const cc::BeginFrameArgs& begin_frame_args) { + current_begin_frame_ack_ = cc::BeginFrameAck( + begin_frame_args.source_id, begin_frame_args.sequence_number, + begin_frame_args.sequence_number, 0, false); if (!root_window_->visible() || begin_frame_args.type == cc::BeginFrameArgs::MISSED) { + begin_frame_source_->DidFinishFrame(this, current_begin_frame_ack_); return; } + current_begin_frame_ack_.has_damage = true; + last_begin_frame_args_ = begin_frame_args; + // TODO(fsamuel): We should add a trace for generating a top level frame. cc::CompositorFrame frame(GenerateCompositorFrame(root_window_->bounds())); - compositor_frame_sink_->SubmitCompositorFrame(std::move(frame)); + + begin_frame_source_->DidFinishFrame(this, current_begin_frame_ack_); SetNeedsBeginFrame(false); - last_begin_frame_args_ = begin_frame_args; } const cc::BeginFrameArgs& FrameGenerator::LastUsedBeginFrameArgs() const { @@ -150,6 +157,7 @@ frame.render_pass_list.push_back(std::move(invert_pass)); } frame.metadata.device_scale_factor = device_scale_factor_; + frame.metadata.begin_frame_ack = current_begin_frame_ack_; if (window_manager_surface_info_.is_valid()) { frame.metadata.referenced_surfaces.push_back(
diff --git a/services/ui/ws/frame_generator.h b/services/ui/ws/frame_generator.h index f9a0647d..ea5887ea 100644 --- a/services/ui/ws/frame_generator.h +++ b/services/ui/ws/frame_generator.h
@@ -84,6 +84,7 @@ std::unique_ptr<cc::CompositorFrameSink> compositor_frame_sink_; cc::BeginFrameArgs last_begin_frame_args_; + cc::BeginFrameAck current_begin_frame_ack_; cc::BeginFrameSource* begin_frame_source_ = nullptr; bool observing_begin_frames_ = false; bool high_contrast_mode_enabled_ = false;
diff --git a/services/ui/ws/frame_generator_unittest.cc b/services/ui/ws/frame_generator_unittest.cc index b2d626e..24326998 100644 --- a/services/ui/ws/frame_generator_unittest.cc +++ b/services/ui/ws/frame_generator_unittest.cc
@@ -86,7 +86,7 @@ } void OnDidFinishFrame(const cc::BeginFrameAck& ack) override { - external_begin_frame_source_->DidFinishFrame(this, ack); + begin_frame_source_->DidFinishFrame(this, ack); } void SetBeginFrameSource(cc::BeginFrameSource* source) { @@ -181,6 +181,12 @@ return compositor_frame_sink_->last_render_pass_list(); } + const cc::BeginFrameAck& LastBeginFrameAck() { + return begin_frame_source_->LastAckForObserver(compositor_frame_sink_); + } + + ServerWindow* root_window() { return root_window_.get(); } + private: FakeCompositorFrameSink* compositor_frame_sink_ = nullptr; std::unique_ptr<cc::FakeExternalBeginFrameSource> begin_frame_source_; @@ -198,6 +204,7 @@ // FrameGenerator does not request BeginFrames upon creation. IssueBeginFrame(); EXPECT_EQ(0, NumberOfFramesReceived()); + EXPECT_EQ(cc::BeginFrameAck(), LastBeginFrameAck()); const cc::SurfaceId kArbitrarySurfaceId( cc::FrameSinkId(1, 1), @@ -218,10 +225,42 @@ EXPECT_EQ(1lu, referenced_surfaces.size()); EXPECT_EQ(kArbitrarySurfaceId, referenced_surfaces.front()); + cc::BeginFrameAck expected_ack(0, 2, 2, 0, true); + EXPECT_EQ(expected_ack, LastBeginFrameAck()); + EXPECT_EQ(expected_ack, last_metadata.begin_frame_ack); + // FrameGenerator stops requesting BeginFrames after submitting a // CompositorFrame. IssueBeginFrame(); EXPECT_EQ(1, NumberOfFramesReceived()); + EXPECT_EQ(expected_ack, LastBeginFrameAck()); +} + +TEST_F(FrameGeneratorTest, BeginFrameWhileInvisible) { + EXPECT_EQ(0, NumberOfFramesReceived()); + + // A valid SurfaceInfo is required for BeginFrame processing. + const cc::SurfaceId kArbitrarySurfaceId( + cc::FrameSinkId(1, 1), + cc::LocalSurfaceId(1, base::UnguessableToken::Create())); + const cc::SurfaceInfo kArbitrarySurfaceInfo(kArbitrarySurfaceId, 1.0f, + gfx::Size(100, 100)); + frame_generator()->OnSurfaceCreated(kArbitrarySurfaceInfo); + EXPECT_EQ(0, NumberOfFramesReceived()); + + // No frames are produced while invisible but in need of BeginFrames. + root_window()->SetVisible(false); + IssueBeginFrame(); + EXPECT_EQ(0, NumberOfFramesReceived()); + EXPECT_EQ(cc::BeginFrameAck(0, 1, 1, 0, false), LastBeginFrameAck()); + + // When visible again, a frame is produced. + root_window()->SetVisible(true); + IssueBeginFrame(); + EXPECT_EQ(1, NumberOfFramesReceived()); + cc::BeginFrameAck expected_ack(0, 2, 2, 0, true); + EXPECT_EQ(expected_ack, LastBeginFrameAck()); + EXPECT_EQ(expected_ack, LastMetadata().begin_frame_ack); } TEST_F(FrameGeneratorTest, SetDeviceScaleFactor) {
diff --git a/services/ui/ws/test_change_tracker.cc b/services/ui/ws/test_change_tracker.cc index 172c9b8..afb9efb 100644 --- a/services/ui/ws/test_change_tracker.cc +++ b/services/ui/ws/test_change_tracker.cc
@@ -37,7 +37,8 @@ case CHANGE_TYPE_EMBED: if (type == ChangeDescriptionType::ONE) return "OnEmbed"; - return base::StringPrintf("OnEmbed drawn=%s", + return base::StringPrintf("OnEmbed %s drawn=%s", + change.frame_sink_id.ToString().c_str(), change.bool_value ? "true" : "false"); case CHANGE_TYPE_EMBEDDED_APP_DISCONNECTED: @@ -134,10 +135,11 @@ change.bool_value ? "true" : "false"); case CHANGE_TYPE_ON_TOP_LEVEL_CREATED: - return base::StringPrintf("TopLevelCreated id=%d window_id=%s drawn=%s", - change.change_id, - WindowIdToString(change.window_id).c_str(), - change.bool_value ? "true" : "false"); + return base::StringPrintf( + "TopLevelCreated id=%d %s window_id=%s drawn=%s", change.change_id, + change.frame_sink_id.ToString().c_str(), + WindowIdToString(change.window_id).c_str(), + change.bool_value ? "true" : "false"); case CHANGE_TYPE_OPACITY: return base::StringPrintf("OpacityChanged window_id=%s opacity=%.2f", WindowIdToString(change.window_id).c_str(), @@ -236,11 +238,13 @@ void TestChangeTracker::OnEmbed(ClientSpecificId client_id, mojom::WindowDataPtr root, - bool drawn) { + bool drawn, + const cc::FrameSinkId& frame_sink_id) { Change change; change.type = CHANGE_TYPE_EMBED; change.client_id = client_id; change.bool_value = drawn; + change.frame_sink_id = frame_sink_id; change.windows.push_back(WindowDataToTestWindow(root)); AddChange(change); } @@ -417,14 +421,17 @@ AddChange(change); } -void TestChangeTracker::OnTopLevelCreated(uint32_t change_id, - mojom::WindowDataPtr window_data, - bool drawn) { +void TestChangeTracker::OnTopLevelCreated( + uint32_t change_id, + mojom::WindowDataPtr window_data, + bool drawn, + const cc::FrameSinkId& frame_sink_id) { Change change; change.type = CHANGE_TYPE_ON_TOP_LEVEL_CREATED; change.change_id = change_id; change.window_id = window_data->window_id; change.bool_value = drawn; + change.frame_sink_id = frame_sink_id; AddChange(change); }
diff --git a/services/ui/ws/test_change_tracker.h b/services/ui/ws/test_change_tracker.h index 3421441e..1ff95cb6 100644 --- a/services/ui/ws/test_change_tracker.h +++ b/services/ui/ws/test_change_tracker.h
@@ -77,6 +77,7 @@ Id window_id3; gfx::Rect bounds; gfx::Rect bounds2; + cc::FrameSinkId frame_sink_id; base::Optional<cc::LocalSurfaceId> local_surface_id; int32_t event_action; bool matches_pointer_watcher; @@ -139,7 +140,8 @@ // WindowTreeClient function. void OnEmbed(ClientSpecificId client_id, mojom::WindowDataPtr root, - bool drawn); + bool drawn, + const cc::FrameSinkId& frame_sink_id); void OnEmbeddedAppDisconnected(Id window_id); void OnUnembed(Id window_id); void OnCaptureChanged(Id new_capture_window_id, Id old_capture_window_id); @@ -175,7 +177,8 @@ void OnChangeCompleted(uint32_t change_id, bool success); void OnTopLevelCreated(uint32_t change_id, mojom::WindowDataPtr window_data, - bool drawn); + bool drawn, + const cc::FrameSinkId& frame_sink_id); void OnWindowSurfaceChanged(Id window_id, const cc::SurfaceInfo& surface_info);
diff --git a/services/ui/ws/test_utils.cc b/services/ui/ws/test_utils.cc index 21f5af8..61d1932 100644 --- a/services/ui/ws/test_utils.cc +++ b/services/ui/ws/test_utils.cc
@@ -271,9 +271,10 @@ ui::mojom::WindowTreePtr tree, int64_t display_id, Id focused_window_id, - bool drawn) { + bool drawn, + const cc::FrameSinkId& frame_sink_id) { // TODO(sky): add test coverage of |focused_window_id|. - tracker_.OnEmbed(client_id, std::move(root), drawn); + tracker_.OnEmbed(client_id, std::move(root), drawn, frame_sink_id); } void TestWindowTreeClient::OnEmbeddedAppDisconnected(uint32_t window) { @@ -289,11 +290,13 @@ tracker_.OnCaptureChanged(new_capture_window_id, old_capture_window_id); } -void TestWindowTreeClient::OnTopLevelCreated(uint32_t change_id, - mojom::WindowDataPtr data, - int64_t display_id, - bool drawn) { - tracker_.OnTopLevelCreated(change_id, std::move(data), drawn); +void TestWindowTreeClient::OnTopLevelCreated( + uint32_t change_id, + mojom::WindowDataPtr data, + int64_t display_id, + bool drawn, + const cc::FrameSinkId& frame_sink_id) { + tracker_.OnTopLevelCreated(change_id, std::move(data), drawn, frame_sink_id); } void TestWindowTreeClient::OnWindowBoundsChanged(
diff --git a/services/ui/ws/test_utils.h b/services/ui/ws/test_utils.h index f776b36..e9a938e5 100644 --- a/services/ui/ws/test_utils.h +++ b/services/ui/ws/test_utils.h
@@ -323,7 +323,8 @@ void OnConnect(uint16_t client_id) override {} void WmNewDisplayAdded(const display::Display& display, ui::mojom::WindowDataPtr root, - bool drawn) override {} + bool drawn, + const cc::FrameSinkId& frame_sink_id) override {} void WmDisplayRemoved(int64_t display_id) override; void WmDisplayModified(const display::Display& display) override {} void WmSetBounds(uint32_t change_id, @@ -394,7 +395,8 @@ ui::mojom::WindowTreePtr tree, int64_t display_id, Id focused_window_id, - bool drawn) override; + bool drawn, + const cc::FrameSinkId& frame_sink_id) override; void OnEmbeddedAppDisconnected(uint32_t window) override; void OnUnembed(Id window_id) override; void OnCaptureChanged(Id new_capture_window_id, @@ -402,7 +404,8 @@ void OnTopLevelCreated(uint32_t change_id, mojom::WindowDataPtr data, int64_t display_id, - bool drawn) override; + bool drawn, + const cc::FrameSinkId& frame_sink_id) override; void OnWindowBoundsChanged( uint32_t window, const gfx::Rect& old_bounds,
diff --git a/services/ui/ws/window_manager_client_unittest.cc b/services/ui/ws/window_manager_client_unittest.cc index c23fbe4..dd173b62 100644 --- a/services/ui/ws/window_manager_client_unittest.cc +++ b/services/ui/ws/window_manager_client_unittest.cc
@@ -715,7 +715,8 @@ aura::WindowTreeClient second_client(connector(), this, nullptr, nullptr, nullptr, false); second_client.ConnectViaWindowTreeFactory(); - aura::WindowTreeHostMus window_tree_host_in_second_client(&second_client); + aura::WindowTreeHostMus window_tree_host_in_second_client( + &second_client, cc::FrameSinkId(1, 1)); window_tree_host_in_second_client.InitHost(); window_tree_host_in_second_client.window()->Show(); ASSERT_TRUE(second_client.GetRoots().count( @@ -744,7 +745,8 @@ aura::WindowTreeClient second_client(connector(), this, nullptr, nullptr, nullptr, false); second_client.ConnectViaWindowTreeFactory(); - aura::WindowTreeHostMus window_tree_host_in_second_client(&second_client); + aura::WindowTreeHostMus window_tree_host_in_second_client( + &second_client, cc::FrameSinkId(1, 1)); window_tree_host_in_second_client.InitHost(); window_tree_host_in_second_client.window()->Show(); aura::Window* second_client_child = NewVisibleWindow(
diff --git a/services/ui/ws/window_tree.cc b/services/ui/ws/window_tree.cc index 4db91d1..88136e1 100644 --- a/services/ui/ws/window_tree.cc +++ b/services/ui/ws/window_tree.cc
@@ -117,7 +117,8 @@ const bool drawn = root->parent() && root->parent()->IsDrawn(); client()->OnEmbed(id_, WindowToWindowData(to_send.front()), std::move(tree), - display_id, focused_window_id.id, drawn); + display_id, focused_window_id.id, drawn, + root->frame_sink_id()); } void WindowTree::ConfigureWindowManager() { @@ -200,9 +201,9 @@ Display* ws_display = GetDisplay(root); DCHECK(ws_display); - window_manager_internal_->WmNewDisplayAdded(ws_display->GetDisplay(), - WindowToWindowData(root), - root->parent()->IsDrawn()); + window_manager_internal_->WmNewDisplayAdded( + ws_display->GetDisplay(), WindowToWindowData(root), + root->parent()->IsDrawn(), root->frame_sink_id()); } void WindowTree::OnWindowDestroyingTreeImpl(WindowTree* tree) { @@ -532,7 +533,7 @@ int64_t display_id = display ? display->GetId() : display::kInvalidDisplayId; const bool drawn = window->parent() && window->parent()->IsDrawn(); client()->OnTopLevelCreated(client_change_id, WindowToWindowData(window), - display_id, drawn); + display_id, drawn, window->frame_sink_id()); } void WindowTree::AddActivationParent(const ClientWindowId& window_id) {
diff --git a/services/ui/ws/window_tree_client_unittest.cc b/services/ui/ws/window_tree_client_unittest.cc index 45254d87..eb2425f 100644 --- a/services/ui/ws/window_tree_client_unittest.cc +++ b/services/ui/ws/window_tree_client_unittest.cc
@@ -279,13 +279,14 @@ mojom::WindowTreePtr tree, int64_t display_id, Id focused_window_id, - bool drawn) override { + bool drawn, + const cc::FrameSinkId& frame_sink_id) override { // TODO(sky): add coverage of |focused_window_id|. ASSERT_TRUE(root); root_window_id_ = root->window_id; tree_ = std::move(tree); client_id_ = client_id; - tracker()->OnEmbed(client_id, std::move(root), drawn); + tracker()->OnEmbed(client_id, std::move(root), drawn, frame_sink_id); if (embed_run_loop_) embed_run_loop_->Quit(); } @@ -300,8 +301,10 @@ void OnTopLevelCreated(uint32_t change_id, mojom::WindowDataPtr data, int64_t display_id, - bool drawn) override { - tracker()->OnTopLevelCreated(change_id, std::move(data), drawn); + bool drawn, + const cc::FrameSinkId& frame_sink_id) override { + tracker()->OnTopLevelCreated(change_id, std::move(data), drawn, + frame_sink_id); } void OnWindowBoundsChanged( Id window_id, @@ -443,7 +446,8 @@ void OnConnect(uint16_t client_id) override {} void WmNewDisplayAdded(const display::Display& display, mojom::WindowDataPtr root_data, - bool drawn) override { + bool drawn, + const cc::FrameSinkId& frame_sink_id) override { NOTIMPLEMENTED(); } void WmDisplayRemoved(int64_t display_id) override { NOTIMPLEMENTED(); } @@ -646,6 +650,9 @@ } client->WaitForOnEmbed(); + // TODO(fsamuel): Currently the FrameSinkId maps directly to the server's + // window ID. This is likely bad from a security perspective and should be + // fixed. EXPECT_EQ("OnEmbed", SingleChangeToDescription(*client->tracker()->changes())); if (client_id) @@ -1729,7 +1736,9 @@ // Establish the second client at 1,2. ASSERT_NO_FATAL_FAILURE(EstablishSecondClientWithRoot(window_1_2)); - EXPECT_EQ("OnEmbed drawn=true", SingleChangeToDescription2(*changes2())); + EXPECT_EQ( + base::StringPrintf("OnEmbed FrameSinkId(%d, 0) drawn=true", window_1_2), + SingleChangeToDescription2(*changes2())); changes2()->clear(); // Show 1,2 from client 1. Client 2 should see this. @@ -1753,8 +1762,13 @@ ASSERT_TRUE(wt_client1()->AddWindow(window_1_1, window_1_2)); // Establish the second client at 1,2. + // TODO(fsamuel): Currently the FrameSinkId maps directly to the server's + // window ID. This is likely bad from a security perspective and should be + // fixed. ASSERT_NO_FATAL_FAILURE(EstablishSecondClientWithRoot(window_1_2)); - EXPECT_EQ("OnEmbed drawn=false", SingleChangeToDescription2(*changes2())); + EXPECT_EQ( + base::StringPrintf("OnEmbed FrameSinkId(%d, 0) drawn=false", window_1_2), + SingleChangeToDescription2(*changes2())); changes2()->clear(); // Show 1,1, drawn should be true for 1,2 (as that is all the child sees).
diff --git a/services/ui/ws/window_tree_unittest.cc b/services/ui/ws/window_tree_unittest.cc index 87f4bd0b..be17bc3 100644 --- a/services/ui/ws/window_tree_unittest.cc +++ b/services/ui/ws/window_tree_unittest.cc
@@ -701,18 +701,24 @@ static_cast<mojom::WindowManagerClient*>(wm_tree()) ->OnWmCreatedTopLevelWindow(wm_change_id, embed_window_id2.id); EXPECT_FALSE(child_binding->is_paused()); - EXPECT_EQ("TopLevelCreated id=17 window_id=" + - WindowIdToString( - WindowIdFromTransportId(embed_window_id2_in_child.id)) + - " drawn=true", - SingleChangeToDescription( - *child_binding->client()->tracker()->changes())); + ServerWindow* embed_window = wm_tree()->GetWindowByClientId(embed_window_id2); + ASSERT_TRUE(embed_window); + // TODO(fsamuel): Currently the FrameSinkId maps directly to the server's + // window ID. This is likely bad from a security perspective and should be + // fixed. + EXPECT_EQ( + base::StringPrintf( + "TopLevelCreated id=17 FrameSinkId(%d, 0) window_id=%s drawn=true", + WindowIdToTransportId(embed_window->id()), + WindowIdToString( + WindowIdFromTransportId(embed_window_id2_in_child.id)) + .c_str()), + SingleChangeToDescription( + *child_binding->client()->tracker()->changes())); child_binding->client()->tracker()->changes()->clear(); // Change the visibility of the window from the owner and make sure the // client sees the right id. - ServerWindow* embed_window = wm_tree()->GetWindowByClientId(embed_window_id2); - ASSERT_TRUE(embed_window); EXPECT_TRUE(embed_window->visible()); ASSERT_TRUE(wm_tree()->SetWindowVisibility( ClientWindowIdForWindow(wm_tree(), embed_window), false));
diff --git a/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp b/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp index 1f6dd37c..5f8ca3e 100644 --- a/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp +++ b/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp
@@ -217,6 +217,101 @@ isConnectionEffectively2G(effectiveConnection)); } +enum class RequestMethod { kIsPost, kIsNotPost }; +enum class RequestType { kIsConditional, kIsNotConditional }; +enum class ResourceType { kIsMainResource, kIsNotMainResource }; + +WebCachePolicy determineWebCachePolicy(RequestMethod method, + RequestType requestType, + ResourceType resourceType, + FrameLoadType loadType) { + switch (loadType) { + case FrameLoadTypeStandard: + return (requestType == RequestType::kIsConditional || + method == RequestMethod::kIsPost) + ? WebCachePolicy::ValidatingCacheData + : WebCachePolicy::UseProtocolCachePolicy; + case FrameLoadTypeReplaceCurrentItem: + case FrameLoadTypeInitialInChildFrame: + // TODO(toyoshim): Should be the same with FrameLoadTypeStandard, but + // keep legacy logic as is. To be changed in a follow-up patch soon. + return (resourceType == ResourceType::kIsMainResource && + (requestType == RequestType::kIsConditional || + method == RequestMethod::kIsPost)) + ? WebCachePolicy::ValidatingCacheData + : WebCachePolicy::UseProtocolCachePolicy; + case FrameLoadTypeInitialHistoryLoad: + // TODO(toyoshim): Should be the same with FrameLoadTypeBackForward, but + // keep legacy logic as is. To be changed in a follow-up patch soon. + return (resourceType == ResourceType::kIsMainResource && + (requestType == RequestType::kIsConditional || + method == RequestMethod::kIsPost)) + ? WebCachePolicy::ValidatingCacheData + : WebCachePolicy::UseProtocolCachePolicy; + case FrameLoadTypeBackForward: + // Mutates the policy for POST requests to avoid form resubmission. + return method == RequestMethod::kIsPost + ? WebCachePolicy::ReturnCacheDataDontLoad + : WebCachePolicy::ReturnCacheDataElseLoad; + case FrameLoadTypeReload: + return WebCachePolicy::ValidatingCacheData; + case FrameLoadTypeReloadMainResource: + return resourceType == ResourceType::kIsMainResource + ? WebCachePolicy::ValidatingCacheData + : WebCachePolicy::UseProtocolCachePolicy; + case FrameLoadTypeReloadBypassingCache: + // TODO(toyoshim): Should return BypassingCache always, but keep legacy + // logic as is. To be changed in a follow-up patch soon. + return (resourceType == ResourceType::kIsMainResource && + (requestType == RequestType::kIsConditional || + method == RequestMethod::kIsPost)) + ? WebCachePolicy::ValidatingCacheData + : WebCachePolicy::BypassingCache; + } + NOTREACHED(); + return WebCachePolicy::UseProtocolCachePolicy; +} + +// TODO(toyoshim): Remove |resourceType|. See comments in +// resourceRequestCachePolicy(). +WebCachePolicy determineFrameWebCachePolicy(Frame* frame, + ResourceType resourceType) { + if (!frame) + return WebCachePolicy::UseProtocolCachePolicy; + if (!frame->isLocalFrame()) + return determineFrameWebCachePolicy(frame->tree().parent(), resourceType); + + // Does not propagate cache policy for subresources after the load event. + // TODO(toyoshim): We should be able to remove following parents' policy check + // if each frame has a relevant FrameLoadType for reload and history + // navigations. + if (resourceType == ResourceType::kIsNotMainResource && + toLocalFrame(frame)->document()->loadEventFinished()) { + return WebCachePolicy::UseProtocolCachePolicy; + } + + // Respects BypassingCache rather than parent's policy. + // TODO(toyoshim): Adopt BypassingCache even for MainResource. + FrameLoadType loadType = + toLocalFrame(frame)->loader().documentLoader()->loadType(); + if (resourceType == ResourceType::kIsNotMainResource && + loadType == FrameLoadTypeReloadBypassingCache) { + return WebCachePolicy::BypassingCache; + } + + // Respects parent's policy if it has a special one. + WebCachePolicy parentPolicy = + determineFrameWebCachePolicy(frame->tree().parent(), resourceType); + if (parentPolicy != WebCachePolicy::UseProtocolCachePolicy) + return parentPolicy; + + // Otherwise, follows FrameLoadType. Use kIsNotPost, kIsNotConditional, and + // kIsNotMainResource to obtain a representative policy for the frame. + return determineWebCachePolicy(RequestMethod::kIsNotPost, + RequestType::kIsNotConditional, + ResourceType::kIsNotMainResource, loadType); +} + } // namespace FrameFetchContext::FrameFetchContext(DocumentLoader* loader, Document* document) @@ -287,118 +382,46 @@ request.setHTTPHeaderField("Save-Data", "on"); } -CachePolicy FrameFetchContext::getCachePolicy() const { - if (m_document && m_document->loadEventFinished()) - return CachePolicyVerify; - - FrameLoadType loadType = masterDocumentLoader()->loadType(); - if (loadType == FrameLoadTypeReloadBypassingCache) - return CachePolicyReload; - - Frame* parentFrame = frame()->tree().parent(); - if (parentFrame && parentFrame->isLocalFrame()) { - CachePolicy parentCachePolicy = toLocalFrame(parentFrame) - ->document() - ->fetcher() - ->context() - .getCachePolicy(); - if (parentCachePolicy != CachePolicyVerify) - return parentCachePolicy; - } - - if (loadType == FrameLoadTypeReload) - return CachePolicyRevalidate; - - if (m_documentLoader && - m_documentLoader->getRequest().getCachePolicy() == - WebCachePolicy::ReturnCacheDataElseLoad) - return CachePolicyHistoryBuffer; - - // Returns CachePolicyVerify for other cases, mainly FrameLoadTypeStandard and - // FrameLoadTypeReloadMainResource. See public/web/WebFrameLoadType.h to know - // how these load types work. - return CachePolicyVerify; -} - -static WebCachePolicy memoryCachePolicyToResourceRequestCachePolicy( - const CachePolicy policy) { - if (policy == CachePolicyVerify) - return WebCachePolicy::UseProtocolCachePolicy; - if (policy == CachePolicyRevalidate) - return WebCachePolicy::ValidatingCacheData; - if (policy == CachePolicyReload) - return WebCachePolicy::BypassingCache; - if (policy == CachePolicyHistoryBuffer) - return WebCachePolicy::ReturnCacheDataElseLoad; - return WebCachePolicy::UseProtocolCachePolicy; -} - -static WebCachePolicy frameLoadTypeToWebCachePolicy(FrameLoadType type) { - if (type == FrameLoadTypeBackForward) - return WebCachePolicy::ReturnCacheDataElseLoad; - if (type == FrameLoadTypeReloadBypassingCache) - return WebCachePolicy::BypassingCache; - if (type == FrameLoadTypeReload) - return WebCachePolicy::ValidatingCacheData; - return WebCachePolicy::UseProtocolCachePolicy; -} - WebCachePolicy FrameFetchContext::resourceRequestCachePolicy( ResourceRequest& request, Resource::Type type, FetchRequest::DeferOption defer) const { DCHECK(frame()); if (type == Resource::MainResource) { - FrameLoadType frameLoadType = masterDocumentLoader()->loadType(); - if (request.httpMethod() == "POST" && - frameLoadType == FrameLoadTypeBackForward) - return WebCachePolicy::ReturnCacheDataDontLoad; - if (frameLoadType == FrameLoadTypeReloadMainResource || - request.isConditional() || request.httpMethod() == "POST") - return WebCachePolicy::ValidatingCacheData; - - WebCachePolicy policy = frameLoadTypeToWebCachePolicy(frameLoadType); - if (policy != WebCachePolicy::UseProtocolCachePolicy) - return policy; - - for (Frame* f = frame()->tree().parent(); f; f = f->tree().parent()) { - if (!f->isLocalFrame()) - continue; - policy = frameLoadTypeToWebCachePolicy( - toLocalFrame(f)->loader().documentLoader()->loadType()); - if (policy != WebCachePolicy::UseProtocolCachePolicy) - return policy; - } - // Returns UseProtocolCachePolicy for other cases, parent frames not having - // special kinds of FrameLoadType as they are checked inside the for loop - // above, or |frameLoadType| being FrameLoadTypeStandard. See - // public/web/WebFrameLoadType.h to know how these load types work. - return WebCachePolicy::UseProtocolCachePolicy; + const WebCachePolicy cachePolicy = determineWebCachePolicy( + request.httpMethod() == "POST" ? RequestMethod::kIsPost + : RequestMethod::kIsNotPost, + request.isConditional() ? RequestType::kIsConditional + : RequestType::kIsNotConditional, + ResourceType::kIsMainResource, masterDocumentLoader()->loadType()); + // Follows the parent frame's policy. + // TODO(toyoshim): Probably, FrameLoadType for each frame should have a + // right type for reload or history navigations, and should not need to + // check parent's frame policy here. Once it has a right FrameLoadType, + // we can remove Resource::Type argument from determineFrameWebCachePolicy. + // See also crbug.com/332602. + if (cachePolicy != WebCachePolicy::UseProtocolCachePolicy) + return cachePolicy; + return determineFrameWebCachePolicy(frame()->tree().parent(), + ResourceType::kIsMainResource); } // For users on slow connections, we want to avoid blocking the parser in // the main frame on script loads inserted via document.write, since it can // add significant delays before page content is displayed on the screen. + // TODO(toyoshim): Move following logic that rewrites ResourceRequest to + // somewhere that should be relevant to the script resource handling. if (type == Resource::Script && isMainFrame() && m_document && shouldDisallowFetchForMainFrameScript(request, defer, *m_document)) return WebCachePolicy::ReturnCacheDataDontLoad; + // TODO(toyoshim): We should check isConditional() and use ValidatingCacheData + // only when |cachePolicy| below is UseProtocolCachePolicy. if (request.isConditional()) return WebCachePolicy::ValidatingCacheData; - if (m_documentLoader && m_document && !m_document->loadEventFinished()) { - // For POST requests, we mutate the main resource's cache policy to avoid - // form resubmission. This policy should not be inherited by subresources. - WebCachePolicy mainResourceCachePolicy = - m_documentLoader->getRequest().getCachePolicy(); - if (m_documentLoader->getRequest().httpMethod() == "POST") { - if (mainResourceCachePolicy == WebCachePolicy::ReturnCacheDataDontLoad) - return WebCachePolicy::ReturnCacheDataElseLoad; - return WebCachePolicy::UseProtocolCachePolicy; - } - return memoryCachePolicyToResourceRequestCachePolicy(getCachePolicy()); - } - return WebCachePolicy::UseProtocolCachePolicy; + return determineFrameWebCachePolicy(frame(), + ResourceType::kIsNotMainResource); } // The |m_documentLoader| is null in the FrameFetchContext of an imported
diff --git a/third_party/WebKit/Source/core/loader/FrameFetchContext.h b/third_party/WebKit/Source/core/loader/FrameFetchContext.h index b087a7c..b30cecf4f 100644 --- a/third_party/WebKit/Source/core/loader/FrameFetchContext.h +++ b/third_party/WebKit/Source/core/loader/FrameFetchContext.h
@@ -74,7 +74,6 @@ void addAdditionalRequestHeaders(ResourceRequest&, FetchResourceType) override; - CachePolicy getCachePolicy() const override; WebCachePolicy resourceRequestCachePolicy( ResourceRequest&, Resource::Type,
diff --git a/third_party/WebKit/Source/platform/loader/fetch/CachePolicy.h b/third_party/WebKit/Source/platform/loader/fetch/CachePolicy.h deleted file mode 100644 index a4e53d1..0000000 --- a/third_party/WebKit/Source/platform/loader/fetch/CachePolicy.h +++ /dev/null
@@ -1,40 +0,0 @@ -/* - * Copyright (C) 2003, 2006 Apple Computer, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef CachePolicy_h -#define CachePolicy_h - -namespace blink { - -// TODO(crbug.com/599364): Should be merged with other CachePolicies. -enum CachePolicy { - CachePolicyVerify, - CachePolicyRevalidate, - CachePolicyReload, - CachePolicyHistoryBuffer -}; -} - -#endif
diff --git a/third_party/WebKit/Source/platform/loader/fetch/FetchContext.cpp b/third_party/WebKit/Source/platform/loader/fetch/FetchContext.cpp index 00b0a35a..c9ccce9 100644 --- a/third_party/WebKit/Source/platform/loader/fetch/FetchContext.cpp +++ b/third_party/WebKit/Source/platform/loader/fetch/FetchContext.cpp
@@ -46,10 +46,6 @@ void FetchContext::addAdditionalRequestHeaders(ResourceRequest&, FetchResourceType) {} -CachePolicy FetchContext::getCachePolicy() const { - return CachePolicyVerify; -} - WebCachePolicy FetchContext::resourceRequestCachePolicy( ResourceRequest&, Resource::Type,
diff --git a/third_party/WebKit/Source/platform/loader/fetch/FetchContext.h b/third_party/WebKit/Source/platform/loader/fetch/FetchContext.h index e206102..921f5cbc 100644 --- a/third_party/WebKit/Source/platform/loader/fetch/FetchContext.h +++ b/third_party/WebKit/Source/platform/loader/fetch/FetchContext.h
@@ -33,7 +33,6 @@ #include "platform/PlatformExport.h" #include "platform/heap/Handle.h" -#include "platform/loader/fetch/CachePolicy.h" #include "platform/loader/fetch/FetchInitiatorInfo.h" #include "platform/loader/fetch/FetchRequest.h" #include "platform/loader/fetch/Resource.h" @@ -78,7 +77,7 @@ virtual bool isLiveContext() { return false; } virtual void addAdditionalRequestHeaders(ResourceRequest&, FetchResourceType); - virtual CachePolicy getCachePolicy() const; + // Returns the cache policy for the resource. ResourceRequest is not passed as // a const reference as a header needs to be added for doc.write blocking // intervention.
diff --git a/third_party/WebKit/Source/platform/mediastream/MediaStreamSource.cpp b/third_party/WebKit/Source/platform/mediastream/MediaStreamSource.cpp index ca12e1c..da4b301 100644 --- a/third_party/WebKit/Source/platform/mediastream/MediaStreamSource.cpp +++ b/third_party/WebKit/Source/platform/mediastream/MediaStreamSource.cpp
@@ -107,9 +107,6 @@ void MediaStreamSource::consumeAudio(AudioBus* bus, size_t numberOfFrames) { ASSERT(m_requiresConsumer); MutexLocker locker(m_audioConsumersLock); - // Prevent GCs from going ahead while this iteration runs, attempting to - // pinpoint crbug.com/682945 failures. - ThreadState::MainThreadGCForbiddenScope scope; for (AudioDestinationConsumer* consumer : m_audioConsumers) consumer->consumeAudio(bus, numberOfFrames); }
diff --git a/third_party/WebKit/Source/platform/wtf/BUILD.gn b/third_party/WebKit/Source/platform/wtf/BUILD.gn index 0aaac6d..b073674 100644 --- a/third_party/WebKit/Source/platform/wtf/BUILD.gn +++ b/third_party/WebKit/Source/platform/wtf/BUILD.gn
@@ -68,6 +68,8 @@ "Assertions.h", "Atomics.h", "AutoReset.h", + "BitVector.cpp", + "BitVector.h", "BitwiseOperations.h", "ByteSwap.h", "CPU.h", @@ -79,20 +81,36 @@ "CryptographicallyRandomNumber.h", "CurrentTime.cpp", "CurrentTime.h", + "DataLog.cpp", + "DataLog.h", + "Deque.h", + "DoublyLinkedList.h", "DynamicAnnotations.cpp", "DynamicAnnotations.h", + "FilePrintStream.h", "Forward.h", "Functional.h", "GetPtr.h", + "HashCountedSet.h", + "HashFunctions.h", + "HashIterators.h", + "HashMap.h", + "HashSet.h", + "HashTable.cpp", + "HashTable.h", "HashTableDeletedValueType.h", + "HashTraits.h", "InstanceCounter.h", "LeakAnnotations.h", + "LinkedHashSet.h", + "ListHashSet.h", "Locker.h", "NonCopyingSort.h", "Noncopyable.h", "NotFound.h", "Optional.h", "PassRefPtr.h", + "PrintStream.h", "PtrUtil.h", "RefCounted.h", "RefPtr.h", @@ -111,6 +129,8 @@ "TreeNode.h", "TriState.h", "TypeTraits.h", + "Vector.h", + "VectorTraits.h", "WTF.h", "WTFExport.h", "WeakPtr.h",
diff --git a/third_party/WebKit/Source/wtf/BitVector.cpp b/third_party/WebKit/Source/platform/wtf/BitVector.cpp similarity index 96% rename from third_party/WebKit/Source/wtf/BitVector.cpp rename to third_party/WebKit/Source/platform/wtf/BitVector.cpp index da7dcaf..0837b58 100644 --- a/third_party/WebKit/Source/wtf/BitVector.cpp +++ b/third_party/WebKit/Source/platform/wtf/BitVector.cpp
@@ -23,11 +23,11 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "wtf/BitVector.h" +#include "platform/wtf/BitVector.h" -#include "wtf/LeakAnnotations.h" -#include "wtf/PrintStream.h" -#include "wtf/allocator/Partitions.h" +#include "platform/wtf/LeakAnnotations.h" +#include "platform/wtf/PrintStream.h" +#include "platform/wtf/allocator/Partitions.h" #include <algorithm> #include <string.h>
diff --git a/third_party/WebKit/Source/platform/wtf/BitVector.h b/third_party/WebKit/Source/platform/wtf/BitVector.h new file mode 100644 index 0000000..e4354e5 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/BitVector.h
@@ -0,0 +1,227 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BitVector_h +#define BitVector_h + +#include "platform/wtf/Allocator.h" +#include "platform/wtf/Assertions.h" +#include "platform/wtf/StdLibExtras.h" +#include "platform/wtf/WTFExport.h" + +namespace WTF { + +class PrintStream; + +// This is a space-efficient, resizeable bitvector class. In the common case it +// occupies one word, but if necessary, it will inflate this one word to point +// to a single chunk of out-of-line allocated storage to store an arbitrary +// number of bits. +// +// - The bitvector remembers the bound of how many bits can be stored, but this +// may be slightly greater (by as much as some platform-specific constant) +// than the last argument passed to ensureSize(). +// +// - The bitvector can resize itself automatically (set, clear, get) or can be +// used in a manual mode, which is faster (quickSet, quickClear, quickGet, +// ensureSize). +// +// - Accesses assert that you are within bounds. +// +// - Bits are automatically initialized to zero. +// +// On the other hand, this BitVector class may not be the fastest around, since +// it does conditionals on every get/set/clear. But it is great if you need to +// juggle a lot of variable-length BitVectors and you're worried about wasting +// space. + +class WTF_EXPORT BitVector { + DISALLOW_NEW(); + + public: + BitVector() : m_bitsOrPointer(makeInlineBits(0)) {} + + explicit BitVector(size_t numBits) : m_bitsOrPointer(makeInlineBits(0)) { + ensureSize(numBits); + } + + BitVector(const BitVector& other) : m_bitsOrPointer(makeInlineBits(0)) { + (*this) = other; + } + + ~BitVector() { + if (isInline()) + return; + OutOfLineBits::destroy(outOfLineBits()); + } + + BitVector& operator=(const BitVector& other) { + if (isInline() && other.isInline()) + m_bitsOrPointer = other.m_bitsOrPointer; + else + setSlow(other); + return *this; + } + + size_t size() const { + if (isInline()) + return maxInlineBits(); + return outOfLineBits()->numBits(); + } + + void ensureSize(size_t numBits) { + if (numBits <= size()) + return; + resizeOutOfLine(numBits); + } + + // Like ensureSize(), but supports reducing the size of the bitvector. + void resize(size_t numBits); + + void clearAll(); + + bool quickGet(size_t bit) const { + SECURITY_CHECK(bit < size()); + return !!(bits()[bit / bitsInPointer()] & + (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1)))); + } + + void quickSet(size_t bit) { + SECURITY_CHECK(bit < size()); + bits()[bit / bitsInPointer()] |= + (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1))); + } + + void quickClear(size_t bit) { + SECURITY_CHECK(bit < size()); + bits()[bit / bitsInPointer()] &= + ~(static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1))); + } + + void quickSet(size_t bit, bool value) { + if (value) + quickSet(bit); + else + quickClear(bit); + } + + bool get(size_t bit) const { + if (bit >= size()) + return false; + return quickGet(bit); + } + + void set(size_t bit) { + ensureSize(bit + 1); + quickSet(bit); + } + + void ensureSizeAndSet(size_t bit, size_t size) { + ensureSize(size); + quickSet(bit); + } + + void clear(size_t bit) { + if (bit >= size()) + return; + quickClear(bit); + } + + void set(size_t bit, bool value) { + if (value) + set(bit); + else + clear(bit); + } + + void dump(PrintStream& out); + + private: + static unsigned bitsInPointer() { return sizeof(void*) << 3; } + + static unsigned maxInlineBits() { return bitsInPointer() - 1; } + + static size_t byteCount(size_t bitCount) { return (bitCount + 7) >> 3; } + + static uintptr_t makeInlineBits(uintptr_t bits) { + DCHECK(!(bits & (static_cast<uintptr_t>(1) << maxInlineBits()))); + return bits | (static_cast<uintptr_t>(1) << maxInlineBits()); + } + + class WTF_EXPORT OutOfLineBits { + DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); + + public: + size_t numBits() const { return m_numBits; } + size_t numWords() const { + return (m_numBits + bitsInPointer() - 1) / bitsInPointer(); + } + uintptr_t* bits() { return bitwiseCast<uintptr_t*>(this + 1); } + const uintptr_t* bits() const { + return bitwiseCast<const uintptr_t*>(this + 1); + } + + static OutOfLineBits* create(size_t numBits); + + static void destroy(OutOfLineBits*); + + private: + OutOfLineBits(size_t numBits) : m_numBits(numBits) {} + + size_t m_numBits; + }; + + bool isInline() const { return m_bitsOrPointer >> maxInlineBits(); } + + const OutOfLineBits* outOfLineBits() const { + return bitwiseCast<const OutOfLineBits*>(m_bitsOrPointer << 1); + } + OutOfLineBits* outOfLineBits() { + return bitwiseCast<OutOfLineBits*>(m_bitsOrPointer << 1); + } + + void resizeOutOfLine(size_t numBits); + void setSlow(const BitVector& other); + + uintptr_t* bits() { + if (isInline()) + return &m_bitsOrPointer; + return outOfLineBits()->bits(); + } + + const uintptr_t* bits() const { + if (isInline()) + return &m_bitsOrPointer; + return outOfLineBits()->bits(); + } + + uintptr_t m_bitsOrPointer; +}; + +} // namespace WTF + +using WTF::BitVector; + +#endif // BitVector_h
diff --git a/third_party/WebKit/Source/wtf/DataLog.cpp b/third_party/WebKit/Source/platform/wtf/DataLog.cpp similarity index 98% rename from third_party/WebKit/Source/wtf/DataLog.cpp rename to third_party/WebKit/Source/platform/wtf/DataLog.cpp index f70534c..a59f137 100644 --- a/third_party/WebKit/Source/wtf/DataLog.cpp +++ b/third_party/WebKit/Source/platform/wtf/DataLog.cpp
@@ -23,7 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include "wtf/DataLog.h" +#include "platform/wtf/DataLog.h" #if OS(POSIX) #include <pthread.h>
diff --git a/third_party/WebKit/Source/platform/wtf/DataLog.h b/third_party/WebKit/Source/platform/wtf/DataLog.h new file mode 100644 index 0000000..1aa94b6 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/DataLog.h
@@ -0,0 +1,54 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DataLog_h +#define DataLog_h + +#include "platform/wtf/Assertions.h" +#include "platform/wtf/Compiler.h" +#include "platform/wtf/FilePrintStream.h" +#include "platform/wtf/WTFExport.h" + +#include <stdarg.h> +#include <stdio.h> + +namespace WTF { + +FilePrintStream& dataFile(); + +WTF_EXPORT PRINTF_FORMAT(1, 0) void dataLogFV(const char* format, va_list); +WTF_EXPORT PRINTF_FORMAT(1, 2) void dataLogF(const char* format, ...); + +template <typename... T> +void dataLog(const T&... values) { + dataFile().print(values...); +} + +} // namespace WTF + +using WTF::dataLog; +using WTF::dataLogF; + +#endif // DataLog_h
diff --git a/third_party/WebKit/Source/platform/wtf/Deque.h b/third_party/WebKit/Source/platform/wtf/Deque.h new file mode 100644 index 0000000..0b1e27d --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/Deque.h
@@ -0,0 +1,706 @@ +/* + * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. + * Copyright (C) 2009 Google Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_Deque_h +#define WTF_Deque_h + +// FIXME: Could move what Vector and Deque share into a separate file. +// Deque doesn't actually use Vector. + +#include "platform/wtf/Vector.h" +#include <iterator> + +namespace WTF { + +template <typename T, size_t inlineCapacity, typename Allocator> +class DequeIteratorBase; +template <typename T, size_t inlineCapacity, typename Allocator> +class DequeIterator; +template <typename T, size_t inlineCapacity, typename Allocator> +class DequeConstIterator; + +template <typename T, + size_t inlineCapacity = 0, + typename Allocator = PartitionAllocator> +class Deque : public ConditionalDestructor<Deque<T, INLINE_CAPACITY, Allocator>, + (INLINE_CAPACITY == 0) && + Allocator::isGarbageCollected> { + USE_ALLOCATOR(Deque, Allocator); + + public: + typedef DequeIterator<T, inlineCapacity, Allocator> iterator; + typedef DequeConstIterator<T, inlineCapacity, Allocator> const_iterator; + typedef std::reverse_iterator<iterator> reverse_iterator; + typedef std::reverse_iterator<const_iterator> const_reverse_iterator; + + Deque(); + Deque(const Deque&); + Deque& operator=(const Deque&); + Deque(Deque&&); + Deque& operator=(Deque&&); + + void finalize(); + void finalizeGarbageCollectedObject() { finalize(); } + + void swap(Deque&); + + size_t size() const { + return m_start <= m_end ? m_end - m_start + : m_end + m_buffer.capacity() - m_start; + } + bool isEmpty() const { return m_start == m_end; } + + iterator begin() { return iterator(this, m_start); } + iterator end() { return iterator(this, m_end); } + const_iterator begin() const { return const_iterator(this, m_start); } + const_iterator end() const { return const_iterator(this, m_end); } + reverse_iterator rbegin() { return reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + T& front() { + DCHECK_NE(m_start, m_end); + return m_buffer.buffer()[m_start]; + } + const T& front() const { + DCHECK_NE(m_start, m_end); + return m_buffer.buffer()[m_start]; + } + T takeFirst(); + + T& back() { + DCHECK_NE(m_start, m_end); + return *(--end()); + } + const T& back() const { + DCHECK_NE(m_start, m_end); + return *(--end()); + } + T takeLast(); + + T& at(size_t i) { + RELEASE_ASSERT(i < size()); + size_t right = m_buffer.capacity() - m_start; + return i < right ? m_buffer.buffer()[m_start + i] + : m_buffer.buffer()[i - right]; + } + const T& at(size_t i) const { + RELEASE_ASSERT(i < size()); + size_t right = m_buffer.capacity() - m_start; + return i < right ? m_buffer.buffer()[m_start + i] + : m_buffer.buffer()[i - right]; + } + + T& operator[](size_t i) { return at(i); } + const T& operator[](size_t i) const { return at(i); } + + template <typename U> + void push_front(U&&); + void erase(iterator&); + void erase(const_iterator&); + + // STL compatibility. + template <typename U> + void push_back(U&&); + void pop_back(); + void pop_front(); + bool empty() const { return isEmpty(); } + template <typename... Args> + void emplace_back(Args&&...); + template <typename... Args> + void emplace_front(Args&&...); + + void clear(); + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher); + + static_assert(!std::is_polymorphic<T>::value || + !VectorTraits<T>::canInitializeWithMemset, + "Cannot initialize with memset if there is a vtable"); + static_assert(Allocator::isGarbageCollected || + !AllowsOnlyPlacementNew<T>::value || + !IsTraceable<T>::value, + "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " + "have trace methods into an off-heap Deque"); + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<T>::value, + "Cannot put raw pointers to garbage-collected classes into a " + "Deque. Use HeapDeque<Member<T>> instead."); + + private: + friend class DequeIteratorBase<T, inlineCapacity, Allocator>; + + class BackingBuffer : public VectorBuffer<T, INLINE_CAPACITY, Allocator> { + WTF_MAKE_NONCOPYABLE(BackingBuffer); + + private: + using Base = VectorBuffer<T, INLINE_CAPACITY, Allocator>; + using Base::m_size; + + public: + BackingBuffer() : Base() {} + explicit BackingBuffer(size_t capacity) : Base(capacity) {} + + void setSize(size_t size) { m_size = size; } + }; + + typedef VectorTypeOperations<T> TypeOperations; + typedef DequeIteratorBase<T, inlineCapacity, Allocator> IteratorBase; + + void erase(size_t position); + void destroyAll(); + void expandCapacityIfNeeded(); + void expandCapacity(); + + BackingBuffer m_buffer; + unsigned m_start; + unsigned m_end; +}; + +template <typename T, size_t inlineCapacity, typename Allocator> +class DequeIteratorBase { + DISALLOW_NEW(); + + protected: + DequeIteratorBase(); + DequeIteratorBase(const Deque<T, inlineCapacity, Allocator>*, size_t); + DequeIteratorBase(const DequeIteratorBase&); + DequeIteratorBase& operator=(const DequeIteratorBase<T, 0, Allocator>&); + ~DequeIteratorBase(); + + void assign(const DequeIteratorBase& other) { *this = other; } + + void increment(); + void decrement(); + + T* before() const; + T* after() const; + + bool isEqual(const DequeIteratorBase&) const; + + private: + Deque<T, inlineCapacity, Allocator>* m_deque; + unsigned m_index; + + friend class Deque<T, inlineCapacity, Allocator>; +}; + +template <typename T, + size_t inlineCapacity = 0, + typename Allocator = PartitionAllocator> +class DequeIterator : public DequeIteratorBase<T, inlineCapacity, Allocator> { + private: + typedef DequeIteratorBase<T, inlineCapacity, Allocator> Base; + typedef DequeIterator<T, inlineCapacity, Allocator> Iterator; + + public: + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef T* pointer; + typedef T& reference; + typedef std::bidirectional_iterator_tag iterator_category; + + DequeIterator(Deque<T, inlineCapacity, Allocator>* deque, size_t index) + : Base(deque, index) {} + + DequeIterator(const Iterator& other) : Base(other) {} + DequeIterator& operator=(const Iterator& other) { + Base::assign(other); + return *this; + } + + T& operator*() const { return *Base::after(); } + T* operator->() const { return Base::after(); } + + bool operator==(const Iterator& other) const { return Base::isEqual(other); } + bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } + + Iterator& operator++() { + Base::increment(); + return *this; + } + // postfix ++ intentionally omitted + Iterator& operator--() { + Base::decrement(); + return *this; + } + // postfix -- intentionally omitted +}; + +template <typename T, + size_t inlineCapacity = 0, + typename Allocator = PartitionAllocator> +class DequeConstIterator + : public DequeIteratorBase<T, inlineCapacity, Allocator> { + private: + typedef DequeIteratorBase<T, inlineCapacity, Allocator> Base; + typedef DequeConstIterator<T, inlineCapacity, Allocator> Iterator; + typedef DequeIterator<T, inlineCapacity, Allocator> NonConstIterator; + + public: + typedef ptrdiff_t difference_type; + typedef T value_type; + typedef const T* pointer; + typedef const T& reference; + typedef std::bidirectional_iterator_tag iterator_category; + + DequeConstIterator(const Deque<T, inlineCapacity, Allocator>* deque, + size_t index) + : Base(deque, index) {} + + DequeConstIterator(const Iterator& other) : Base(other) {} + DequeConstIterator(const NonConstIterator& other) : Base(other) {} + DequeConstIterator& operator=(const Iterator& other) { + Base::assign(other); + return *this; + } + DequeConstIterator& operator=(const NonConstIterator& other) { + Base::assign(other); + return *this; + } + + const T& operator*() const { return *Base::after(); } + const T* operator->() const { return Base::after(); } + + bool operator==(const Iterator& other) const { return Base::isEqual(other); } + bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } + + Iterator& operator++() { + Base::increment(); + return *this; + } + // postfix ++ intentionally omitted + Iterator& operator--() { + Base::decrement(); + return *this; + } + // postfix -- intentionally omitted +}; + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Deque<T, inlineCapacity, Allocator>::Deque() : m_start(0), m_end(0) {} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Deque<T, inlineCapacity, Allocator>::Deque(const Deque& other) + : m_buffer(other.m_buffer.capacity()), + m_start(other.m_start), + m_end(other.m_end) { + const T* otherBuffer = other.m_buffer.buffer(); + if (m_start <= m_end) { + TypeOperations::uninitializedCopy(otherBuffer + m_start, + otherBuffer + m_end, + m_buffer.buffer() + m_start); + } else { + TypeOperations::uninitializedCopy(otherBuffer, otherBuffer + m_end, + m_buffer.buffer()); + TypeOperations::uninitializedCopy(otherBuffer + m_start, + otherBuffer + m_buffer.capacity(), + m_buffer.buffer() + m_start); + } +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Deque<T, inlineCapacity, Allocator>& +Deque<T, inlineCapacity, Allocator>::operator=(const Deque& other) { + Deque<T> copy(other); + swap(copy); + return *this; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Deque<T, inlineCapacity, Allocator>::Deque(Deque&& other) + : m_start(0), m_end(0) { + swap(other); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Deque<T, inlineCapacity, Allocator>& +Deque<T, inlineCapacity, Allocator>::operator=(Deque&& other) { + swap(other); + return *this; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::destroyAll() { + if (m_start <= m_end) { + TypeOperations::destruct(m_buffer.buffer() + m_start, + m_buffer.buffer() + m_end); + m_buffer.clearUnusedSlots(m_buffer.buffer() + m_start, + m_buffer.buffer() + m_end); + } else { + TypeOperations::destruct(m_buffer.buffer(), m_buffer.buffer() + m_end); + m_buffer.clearUnusedSlots(m_buffer.buffer(), m_buffer.buffer() + m_end); + TypeOperations::destruct(m_buffer.buffer() + m_start, + m_buffer.buffer() + m_buffer.capacity()); + m_buffer.clearUnusedSlots(m_buffer.buffer() + m_start, + m_buffer.buffer() + m_buffer.capacity()); + } +} + +// Off-GC-heap deques: Destructor should be called. +// On-GC-heap deques: Destructor should be called for inline buffers (if any) +// but destructor shouldn't be called for vector backing since it is managed by +// the traced GC heap. +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::finalize() { + if (!INLINE_CAPACITY && !m_buffer.buffer()) + return; + if (!isEmpty() && + !(Allocator::isGarbageCollected && m_buffer.hasOutOfLineBuffer())) + destroyAll(); + + m_buffer.destruct(); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::swap(Deque& other) { + typename BackingBuffer::OffsetRange thisHole; + if (m_start <= m_end) { + m_buffer.setSize(m_end); + thisHole.begin = 0; + thisHole.end = m_start; + } else { + m_buffer.setSize(m_buffer.capacity()); + thisHole.begin = m_end; + thisHole.end = m_start; + } + typename BackingBuffer::OffsetRange otherHole; + if (other.m_start <= other.m_end) { + other.m_buffer.setSize(other.m_end); + otherHole.begin = 0; + otherHole.end = other.m_start; + } else { + other.m_buffer.setSize(other.m_buffer.capacity()); + otherHole.begin = other.m_end; + otherHole.end = other.m_start; + } + + m_buffer.swapVectorBuffer(other.m_buffer, thisHole, otherHole); + + std::swap(m_start, other.m_start); + std::swap(m_end, other.m_end); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::clear() { + destroyAll(); + m_start = 0; + m_end = 0; + m_buffer.deallocateBuffer(m_buffer.buffer()); + m_buffer.resetBufferPointer(); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::expandCapacityIfNeeded() { + if (m_start) { + if (m_end + 1 != m_start) + return; + } else if (m_end) { + if (m_end != m_buffer.capacity() - 1) + return; + } else if (m_buffer.capacity()) { + return; + } + + expandCapacity(); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Deque<T, inlineCapacity, Allocator>::expandCapacity() { + size_t oldCapacity = m_buffer.capacity(); + T* oldBuffer = m_buffer.buffer(); + size_t newCapacity = + std::max(static_cast<size_t>(16), oldCapacity + oldCapacity / 4 + 1); + if (m_buffer.expandBuffer(newCapacity)) { + if (m_start <= m_end) { + // No adjustments to be done. + } else { + size_t newStart = m_buffer.capacity() - (oldCapacity - m_start); + TypeOperations::moveOverlapping(oldBuffer + m_start, + oldBuffer + oldCapacity, + m_buffer.buffer() + newStart); + m_buffer.clearUnusedSlots(oldBuffer + m_start, + oldBuffer + std::min(oldCapacity, newStart)); + m_start = newStart; + } + return; + } + m_buffer.allocateBuffer(newCapacity); + if (m_start <= m_end) { + TypeOperations::move(oldBuffer + m_start, oldBuffer + m_end, + m_buffer.buffer() + m_start); + m_buffer.clearUnusedSlots(oldBuffer + m_start, oldBuffer + m_end); + } else { + TypeOperations::move(oldBuffer, oldBuffer + m_end, m_buffer.buffer()); + m_buffer.clearUnusedSlots(oldBuffer, oldBuffer + m_end); + size_t newStart = m_buffer.capacity() - (oldCapacity - m_start); + TypeOperations::move(oldBuffer + m_start, oldBuffer + oldCapacity, + m_buffer.buffer() + newStart); + m_buffer.clearUnusedSlots(oldBuffer + m_start, oldBuffer + oldCapacity); + m_start = newStart; + } + m_buffer.deallocateBuffer(oldBuffer); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline T Deque<T, inlineCapacity, Allocator>::takeFirst() { + T oldFirst = std::move(front()); + pop_front(); + return oldFirst; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline T Deque<T, inlineCapacity, Allocator>::takeLast() { + T oldLast = std::move(back()); + pop_back(); + return oldLast; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +inline void Deque<T, inlineCapacity, Allocator>::push_back(U&& value) { + expandCapacityIfNeeded(); + T* newElement = &m_buffer.buffer()[m_end]; + if (m_end == m_buffer.capacity() - 1) + m_end = 0; + else + ++m_end; + new (NotNull, newElement) T(std::forward<U>(value)); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +inline void Deque<T, inlineCapacity, Allocator>::push_front(U&& value) { + expandCapacityIfNeeded(); + if (!m_start) + m_start = m_buffer.capacity() - 1; + else + --m_start; + new (NotNull, &m_buffer.buffer()[m_start]) T(std::forward<U>(value)); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename... Args> +inline void Deque<T, inlineCapacity, Allocator>::emplace_back(Args&&... args) { + expandCapacityIfNeeded(); + T* newElement = &m_buffer.buffer()[m_end]; + if (m_end == m_buffer.capacity() - 1) + m_end = 0; + else + ++m_end; + new (NotNull, newElement) T(std::forward<Args>(args)...); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename... Args> +inline void Deque<T, inlineCapacity, Allocator>::emplace_front(Args&&... args) { + expandCapacityIfNeeded(); + if (!m_start) + m_start = m_buffer.capacity() - 1; + else + --m_start; + new (NotNull, &m_buffer.buffer()[m_start]) T(std::forward<Args>(args)...); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::pop_front() { + DCHECK(!isEmpty()); + TypeOperations::destruct(&m_buffer.buffer()[m_start], + &m_buffer.buffer()[m_start + 1]); + m_buffer.clearUnusedSlots(&m_buffer.buffer()[m_start], + &m_buffer.buffer()[m_start + 1]); + if (m_start == m_buffer.capacity() - 1) + m_start = 0; + else + ++m_start; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::pop_back() { + DCHECK(!isEmpty()); + if (!m_end) + m_end = m_buffer.capacity() - 1; + else + --m_end; + TypeOperations::destruct(&m_buffer.buffer()[m_end], + &m_buffer.buffer()[m_end + 1]); + m_buffer.clearUnusedSlots(&m_buffer.buffer()[m_end], + &m_buffer.buffer()[m_end + 1]); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::erase(iterator& it) { + erase(it.m_index); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::erase(const_iterator& it) { + erase(it.m_index); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Deque<T, inlineCapacity, Allocator>::erase(size_t position) { + if (position == m_end) + return; + + T* buffer = m_buffer.buffer(); + TypeOperations::destruct(&buffer[position], &buffer[position + 1]); + + // Find which segment of the circular buffer contained the remove element, + // and only move elements in that part. + if (position >= m_start) { + TypeOperations::moveOverlapping(buffer + m_start, buffer + position, + buffer + m_start + 1); + m_buffer.clearUnusedSlots(buffer + m_start, buffer + m_start + 1); + m_start = (m_start + 1) % m_buffer.capacity(); + } else { + TypeOperations::moveOverlapping(buffer + position + 1, buffer + m_end, + buffer + position); + m_buffer.clearUnusedSlots(buffer + m_end - 1, buffer + m_end); + m_end = (m_end - 1 + m_buffer.capacity()) % m_buffer.capacity(); + } +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline DequeIteratorBase<T, inlineCapacity, Allocator>::DequeIteratorBase() + : m_deque(0) {} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline DequeIteratorBase<T, inlineCapacity, Allocator>::DequeIteratorBase( + const Deque<T, inlineCapacity, Allocator>* deque, + size_t index) + : m_deque(const_cast<Deque<T, inlineCapacity, Allocator>*>(deque)), + m_index(index) {} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline DequeIteratorBase<T, inlineCapacity, Allocator>::DequeIteratorBase( + const DequeIteratorBase& other) + : m_deque(other.m_deque), m_index(other.m_index) {} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline DequeIteratorBase<T, inlineCapacity, Allocator>& +DequeIteratorBase<T, inlineCapacity, Allocator>::operator=( + const DequeIteratorBase<T, 0, Allocator>& other) { + m_deque = other.m_deque; + m_index = other.m_index; + return *this; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline DequeIteratorBase<T, inlineCapacity, Allocator>::~DequeIteratorBase() {} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline bool DequeIteratorBase<T, inlineCapacity, Allocator>::isEqual( + const DequeIteratorBase& other) const { + return m_index == other.m_index; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void DequeIteratorBase<T, inlineCapacity, Allocator>::increment() { + DCHECK_NE(m_index, m_deque->m_end); + DCHECK(m_deque->m_buffer.capacity()); + if (m_index == m_deque->m_buffer.capacity() - 1) + m_index = 0; + else + ++m_index; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void DequeIteratorBase<T, inlineCapacity, Allocator>::decrement() { + DCHECK_NE(m_index, m_deque->m_start); + DCHECK(m_deque->m_buffer.capacity()); + if (!m_index) + m_index = m_deque->m_buffer.capacity() - 1; + else + --m_index; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline T* DequeIteratorBase<T, inlineCapacity, Allocator>::after() const { + RELEASE_ASSERT(m_index != m_deque->m_end); + return &m_deque->m_buffer.buffer()[m_index]; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline T* DequeIteratorBase<T, inlineCapacity, Allocator>::before() const { + RELEASE_ASSERT(m_index != m_deque->m_start); + if (!m_index) + return &m_deque->m_buffer.buffer()[m_deque->m_buffer.capacity() - 1]; + return &m_deque->m_buffer.buffer()[m_index - 1]; +} + +// This is only called if the allocator is a HeapAllocator. It is used when +// visiting during a tracing GC. +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename VisitorDispatcher> +void Deque<T, inlineCapacity, Allocator>::trace(VisitorDispatcher visitor) { + DCHECK(Allocator::isGarbageCollected) << "Garbage collector must be enabled."; + const T* bufferBegin = m_buffer.buffer(); + const T* end = bufferBegin + m_end; + if (IsTraceableInCollectionTrait<VectorTraits<T>>::value) { + if (m_start <= m_end) { + for (const T* bufferEntry = bufferBegin + m_start; bufferEntry != end; + bufferEntry++) + Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( + visitor, *const_cast<T*>(bufferEntry)); + } else { + for (const T* bufferEntry = bufferBegin; bufferEntry != end; + bufferEntry++) + Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( + visitor, *const_cast<T*>(bufferEntry)); + const T* bufferEnd = m_buffer.buffer() + m_buffer.capacity(); + for (const T* bufferEntry = bufferBegin + m_start; + bufferEntry != bufferEnd; bufferEntry++) + Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( + visitor, *const_cast<T*>(bufferEntry)); + } + } + if (m_buffer.hasOutOfLineBuffer()) { + Allocator::markNoTracing(visitor, m_buffer.buffer()); + Allocator::registerBackingStoreReference(visitor, m_buffer.bufferSlot()); + } +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void swap(Deque<T, inlineCapacity, Allocator>& a, + Deque<T, inlineCapacity, Allocator>& b) { + a.swap(b); +} + +} // namespace WTF + +using WTF::Deque; + +#endif // WTF_Deque_h
diff --git a/third_party/WebKit/Source/platform/wtf/DoublyLinkedList.h b/third_party/WebKit/Source/platform/wtf/DoublyLinkedList.h new file mode 100644 index 0000000..e44950885 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/DoublyLinkedList.h
@@ -0,0 +1,197 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DoublyLinkedList_h +#define DoublyLinkedList_h + +#include "platform/wtf/Allocator.h" + +namespace WTF { + +// This class allows nodes to share code without dictating data member layout. +template <typename T> +class DoublyLinkedListNode { + public: + DoublyLinkedListNode(); + + void setPrev(T*); + void setNext(T*); + + T* prev() const; + T* next() const; +}; + +template <typename T> +inline DoublyLinkedListNode<T>::DoublyLinkedListNode() { + setPrev(0); + setNext(0); +} + +template <typename T> +inline void DoublyLinkedListNode<T>::setPrev(T* prev) { + static_cast<T*>(this)->m_prev = prev; +} + +template <typename T> +inline void DoublyLinkedListNode<T>::setNext(T* next) { + static_cast<T*>(this)->m_next = next; +} + +template <typename T> +inline T* DoublyLinkedListNode<T>::prev() const { + return static_cast<const T*>(this)->m_prev; +} + +template <typename T> +inline T* DoublyLinkedListNode<T>::next() const { + return static_cast<const T*>(this)->m_next; +} + +template <typename T> +class DoublyLinkedList { + USING_FAST_MALLOC(DoublyLinkedList); + + public: + DoublyLinkedList(); + + bool isEmpty() const; + size_t size() const; // This is O(n). + void clear(); + + T* head() const; + T* removeHead(); + + T* tail() const; + + void push(T*); + void append(T*); + void remove(T*); + + private: + T* m_head; + T* m_tail; +}; + +template <typename T> +inline DoublyLinkedList<T>::DoublyLinkedList() : m_head(0), m_tail(0) {} + +template <typename T> +inline bool DoublyLinkedList<T>::isEmpty() const { + return !m_head; +} + +template <typename T> +inline size_t DoublyLinkedList<T>::size() const { + size_t size = 0; + for (T* node = m_head; node; node = node->next()) + ++size; + return size; +} + +template <typename T> +inline void DoublyLinkedList<T>::clear() { + m_head = 0; + m_tail = 0; +} + +template <typename T> +inline T* DoublyLinkedList<T>::head() const { + return m_head; +} + +template <typename T> +inline T* DoublyLinkedList<T>::tail() const { + return m_tail; +} + +template <typename T> +inline void DoublyLinkedList<T>::push(T* node) { + if (!m_head) { + DCHECK(!m_tail); + m_head = node; + m_tail = node; + node->setPrev(0); + node->setNext(0); + return; + } + + DCHECK(m_tail); + m_head->setPrev(node); + node->setNext(m_head); + node->setPrev(0); + m_head = node; +} + +template <typename T> +inline void DoublyLinkedList<T>::append(T* node) { + if (!m_tail) { + DCHECK(!m_head); + m_head = node; + m_tail = node; + node->setPrev(0); + node->setNext(0); + return; + } + + DCHECK(m_head); + m_tail->setNext(node); + node->setPrev(m_tail); + node->setNext(0); + m_tail = node; +} + +template <typename T> +inline void DoublyLinkedList<T>::remove(T* node) { + if (node->prev()) { + DCHECK_NE(node, m_head); + node->prev()->setNext(node->next()); + } else { + DCHECK_EQ(node, m_head); + m_head = node->next(); + } + + if (node->next()) { + DCHECK_NE(node, m_tail); + node->next()->setPrev(node->prev()); + } else { + DCHECK_EQ(node, m_tail); + m_tail = node->prev(); + } +} + +template <typename T> +inline T* DoublyLinkedList<T>::removeHead() { + T* node = head(); + if (node) + remove(node); + return node; +} + +} // namespace WTF + +using WTF::DoublyLinkedListNode; +using WTF::DoublyLinkedList; + +#endif
diff --git a/third_party/WebKit/Source/platform/wtf/FilePrintStream.h b/third_party/WebKit/Source/platform/wtf/FilePrintStream.h new file mode 100644 index 0000000..41c7df1a --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/FilePrintStream.h
@@ -0,0 +1,60 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FilePrintStream_h +#define FilePrintStream_h + +#include "platform/wtf/Compiler.h" +#include "platform/wtf/PrintStream.h" +#include <memory> +#include <stdio.h> + +namespace WTF { + +class WTF_EXPORT FilePrintStream final : public PrintStream { + public: + enum AdoptionMode { Adopt, Borrow }; + + FilePrintStream(FILE*, AdoptionMode = Adopt); + ~FilePrintStream() override; + + static std::unique_ptr<FilePrintStream> open(const char* filename, + const char* mode); + + FILE* file() { return m_file; } + + PRINTF_FORMAT(2, 0) void vprintf(const char* format, va_list) override; + void flush() override; + + private: + FILE* m_file; + AdoptionMode m_adoptionMode; +}; + +} // namespace WTF + +using WTF::FilePrintStream; + +#endif // FilePrintStream_h
diff --git a/third_party/WebKit/Source/platform/wtf/HashCountedSet.h b/third_party/WebKit/Source/platform/wtf/HashCountedSet.h new file mode 100644 index 0000000..8f26e5c5 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashCountedSet.h
@@ -0,0 +1,188 @@ +/* + * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashCountedSet_h +#define WTF_HashCountedSet_h + +#include "platform/wtf/Assertions.h" +#include "platform/wtf/HashMap.h" +#include "platform/wtf/Vector.h" +#include "platform/wtf/allocator/PartitionAllocator.h" + +namespace WTF { + +// An unordered hash set that keeps track of how many times you added an item to +// the set. The iterators have fields ->key and ->value that return the set +// members and their counts, respectively. +template <typename Value, + typename HashFunctions = typename DefaultHash<Value>::Hash, + typename Traits = HashTraits<Value>, + typename Allocator = PartitionAllocator> +class HashCountedSet { + USE_ALLOCATOR(HashCountedSet, Allocator); + WTF_MAKE_NONCOPYABLE(HashCountedSet); + + private: + typedef HashMap<Value, + unsigned, + HashFunctions, + Traits, + HashTraits<unsigned>, + Allocator> + ImplType; + + public: + typedef Value ValueType; + using value_type = ValueType; + typedef typename ImplType::iterator iterator; + typedef typename ImplType::const_iterator const_iterator; + typedef typename ImplType::AddResult AddResult; + + HashCountedSet() { + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<Value>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap HashCountedSet. Use " + "HeapHashCountedSet<Member<T>> instead."); + } + + void swap(HashCountedSet& other) { m_impl.swap(other.m_impl); } + + unsigned size() const { return m_impl.size(); } + unsigned capacity() const { return m_impl.capacity(); } + bool isEmpty() const { return m_impl.isEmpty(); } + + // Iterators iterate over pairs of values (called key) and counts (called + // value). + iterator begin() { return m_impl.begin(); } + iterator end() { return m_impl.end(); } + const_iterator begin() const { return m_impl.begin(); } + const_iterator end() const { return m_impl.end(); } + + iterator find(const ValueType& value) { return m_impl.find(value); } + const_iterator find(const ValueType& value) const { + return m_impl.find(value); + } + bool contains(const ValueType& value) const { return m_impl.contains(value); } + unsigned count(const ValueType& value) const { return m_impl.at(value); } + + // Increases the count if an equal value is already present the return value + // is a pair of an iterator to the new value's location, and a bool that is + // true if an new entry was added. + AddResult add(const ValueType&); + + // Generalized add(), adding the value N times. + AddResult add(const ValueType&, unsigned); + + // Reduces the count of the value, and removes it if count goes down to + // zero, returns true if the value is removed. + bool remove(const ValueType& value) { return remove(find(value)); } + bool remove(iterator); + + // Removes the value, regardless of its count. + void removeAll(const ValueType& value) { removeAll(find(value)); } + void removeAll(iterator); + + // Clears the whole set. + void clear() { m_impl.clear(); } + + Vector<Value> asVector() const; + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + m_impl.trace(visitor); + } + + private: + ImplType m_impl; +}; + +template <typename T, typename U, typename V, typename W> +inline typename HashCountedSet<T, U, V, W>::AddResult +HashCountedSet<T, U, V, W>::add(const ValueType& value, unsigned count) { + DCHECK_GT(count, 0u); + AddResult result = m_impl.insert(value, 0); + result.storedValue->value += count; + return result; +} + +template <typename T, typename U, typename V, typename W> +inline typename HashCountedSet<T, U, V, W>::AddResult +HashCountedSet<T, U, V, W>::add(const ValueType& value) { + return add(value, 1u); +} + +template <typename T, typename U, typename V, typename W> +inline bool HashCountedSet<T, U, V, W>::remove(iterator it) { + if (it == end()) + return false; + + unsigned oldVal = it->value; + DCHECK(oldVal); + unsigned newVal = oldVal - 1; + if (newVal) { + it->value = newVal; + return false; + } + + m_impl.erase(it); + return true; +} + +template <typename T, typename U, typename V, typename W> +inline void HashCountedSet<T, U, V, W>::removeAll(iterator it) { + if (it == end()) + return; + + m_impl.erase(it); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator, + typename VectorType> +inline void copyToVector( + const HashCountedSet<Value, HashFunctions, Traits, Allocator>& collection, + VectorType& vector) { + { + // Disallow GC across resize allocation, see crbug.com/568173 + typename VectorType::GCForbiddenScope scope; + vector.resize(collection.size()); + } + + auto it = collection.begin(); + auto end = collection.end(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = (*it).key; +} + +template <typename T, typename U, typename V, typename W> +inline Vector<T> HashCountedSet<T, U, V, W>::asVector() const { + Vector<T> vector; + copyToVector(*this, vector); + return vector; +} + +} // namespace WTF + +using WTF::HashCountedSet; + +#endif // WTF_HashCountedSet_h
diff --git a/third_party/WebKit/Source/platform/wtf/HashFunctions.h b/third_party/WebKit/Source/platform/wtf/HashFunctions.h new file mode 100644 index 0000000..a581302 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashFunctions.h
@@ -0,0 +1,291 @@ +/* + * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashFunctions_h +#define WTF_HashFunctions_h + +#include "platform/wtf/RefPtr.h" +#include "platform/wtf/StdLibExtras.h" +#include <memory> +#include <stdint.h> +#include <type_traits> + +namespace WTF { + +template <size_t size> +struct IntTypes; +template <> +struct IntTypes<1> { + typedef int8_t SignedType; + typedef uint8_t UnsignedType; +}; +template <> +struct IntTypes<2> { + typedef int16_t SignedType; + typedef uint16_t UnsignedType; +}; +template <> +struct IntTypes<4> { + typedef int32_t SignedType; + typedef uint32_t UnsignedType; +}; +template <> +struct IntTypes<8> { + typedef int64_t SignedType; + typedef uint64_t UnsignedType; +}; + +// integer hash function + +// Thomas Wang's 32 Bit Mix Function: +// http://www.cris.com/~Ttwang/tech/inthash.htm +inline unsigned hashInt(uint8_t key8) { + unsigned key = key8; + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} + +// Thomas Wang's 32 Bit Mix Function: +// http://www.cris.com/~Ttwang/tech/inthash.htm +inline unsigned hashInt(uint16_t key16) { + unsigned key = key16; + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} + +// Thomas Wang's 32 Bit Mix Function: +// http://www.cris.com/~Ttwang/tech/inthash.htm +inline unsigned hashInt(uint32_t key) { + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} + +// Thomas Wang's 64 bit Mix Function: +// http://www.cris.com/~Ttwang/tech/inthash.htm +inline unsigned hashInt(uint64_t key) { + key += ~(key << 32); + key ^= (key >> 22); + key += ~(key << 13); + key ^= (key >> 8); + key += (key << 3); + key ^= (key >> 15); + key += ~(key << 27); + key ^= (key >> 31); + return static_cast<unsigned>(key); +} + +// Compound integer hash method: +// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000 +inline unsigned hashInts(unsigned key1, unsigned key2) { + unsigned shortRandom1 = 277951225; // A random 32-bit value. + unsigned shortRandom2 = 95187966; // A random 32-bit value. + uint64_t longRandom = 19248658165952623LL; // A random, odd 64-bit value. + + uint64_t product = + longRandom * shortRandom1 * key1 + longRandom * shortRandom2 * key2; + unsigned highBits = static_cast<unsigned>( + product >> (8 * (sizeof(uint64_t) - sizeof(unsigned)))); + return highBits; +} + +template <typename T> +struct IntHash { + static unsigned hash(T key) { + return hashInt( + static_cast<typename IntTypes<sizeof(T)>::UnsignedType>(key)); + } + static bool equal(T a, T b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +template <typename T> +struct FloatHash { + typedef typename IntTypes<sizeof(T)>::UnsignedType Bits; + static unsigned hash(T key) { return hashInt(bitwiseCast<Bits>(key)); } + static bool equal(T a, T b) { + return bitwiseCast<Bits>(a) == bitwiseCast<Bits>(b); + } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +// pointer identity hash function + +template <typename T> +struct PtrHash { + static unsigned hash(T* key) { +#if COMPILER(MSVC) +#pragma warning(push) +// work around what seems to be a bug in MSVC's conversion warnings +#pragma warning(disable : 4244) +#endif + return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key)); +#if COMPILER(MSVC) +#pragma warning(pop) +#endif + } + static bool equal(T* a, T* b) { return a == b; } + static bool equal(std::nullptr_t, T* b) { return !b; } + static bool equal(T* a, std::nullptr_t) { return !a; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +template <typename T> +struct RefPtrHash : PtrHash<T> { + using PtrHash<T>::hash; + static unsigned hash(const RefPtr<T>& key) { return hash(key.get()); } + static unsigned hash(const PassRefPtr<T>& key) { return hash(key.get()); } + using PtrHash<T>::equal; + static bool equal(const RefPtr<T>& a, const RefPtr<T>& b) { return a == b; } + static bool equal(T* a, const RefPtr<T>& b) { return a == b; } + static bool equal(const RefPtr<T>& a, T* b) { return a == b; } + static bool equal(const RefPtr<T>& a, const PassRefPtr<T>& b) { + return a == b; + } +}; + +template <typename T> +struct UniquePtrHash : PtrHash<T> { + using PtrHash<T>::hash; + static unsigned hash(const std::unique_ptr<T>& key) { + return hash(key.get()); + } + static bool equal(const std::unique_ptr<T>& a, const std::unique_ptr<T>& b) { + return a == b; + } + static bool equal(const std::unique_ptr<T>& a, const T* b) { + return a.get() == b; + } + static bool equal(const T* a, const std::unique_ptr<T>& b) { + return a == b.get(); + } +}; + +// Default hash function for each type. +template <typename T> +struct DefaultHash; + +// Actual implementation of DefaultHash. +// +// The case of |isIntegral| == false is not implemented. If you see a compile +// error saying DefaultHashImpl<T, false> is not defined, that's because the +// default hash functions for T are not defined. You need to implement them +// yourself. +template <typename T, bool isIntegral> +struct DefaultHashImpl; + +template <typename T> +struct DefaultHashImpl<T, true> { + using Hash = IntHash<typename std::make_unsigned<T>::type>; +}; + +// Canonical implementation of DefaultHash. +template <typename T> +struct DefaultHash : DefaultHashImpl<T, std::is_integral<T>::value> {}; + +// Specializations of DefaultHash follow. +template <> +struct DefaultHash<float> { + using Hash = FloatHash<float>; +}; +template <> +struct DefaultHash<double> { + using Hash = FloatHash<double>; +}; + +// Specializations for pointer types. +template <typename T> +struct DefaultHash<T*> { + using Hash = PtrHash<T>; +}; +template <typename T> +struct DefaultHash<RefPtr<T>> { + using Hash = RefPtrHash<T>; +}; +template <typename T> +struct DefaultHash<std::unique_ptr<T>> { + using Hash = UniquePtrHash<T>; +}; + +// Specializations for pairs. + +// Generic case (T or U is non-integral): +template <typename T, typename U, bool areBothIntegral> +struct PairHashImpl { + static unsigned hash(const std::pair<T, U>& p) { + return hashInts(DefaultHash<T>::Hash::hash(p.first), + DefaultHash<U>::Hash::hash(p.second)); + } + static bool equal(const std::pair<T, U>& a, const std::pair<T, U>& b) { + return DefaultHash<T>::Hash::equal(a.first, b.first) && + DefaultHash<U>::Hash::equal(a.second, b.second); + } + static const bool safeToCompareToEmptyOrDeleted = + DefaultHash<T>::Hash::safeToCompareToEmptyOrDeleted && + DefaultHash<U>::Hash::safeToCompareToEmptyOrDeleted; +}; + +// Special version for pairs of integrals: +template <typename T, typename U> +struct PairHashImpl<T, U, true> { + static unsigned hash(const std::pair<T, U>& p) { + return hashInts(p.first, p.second); + } + static bool equal(const std::pair<T, U>& a, const std::pair<T, U>& b) { + return PairHashImpl<T, U, false>::equal( + a, b); // Refer to the generic version. + } + static const bool safeToCompareToEmptyOrDeleted = + PairHashImpl<T, U, false>::safeToCompareToEmptyOrDeleted; +}; + +// Combined version: +template <typename T, typename U> +struct PairHash + : PairHashImpl<T, + U, + std::is_integral<T>::value && std::is_integral<U>::value> {}; + +template <typename T, typename U> +struct DefaultHash<std::pair<T, U>> { + using Hash = PairHash<T, U>; +}; + +} // namespace WTF + +using WTF::DefaultHash; +using WTF::IntHash; +using WTF::PtrHash; + +#endif // WTF_HashFunctions_h
diff --git a/third_party/WebKit/Source/platform/wtf/HashIterators.h b/third_party/WebKit/Source/platform/wtf/HashIterators.h new file mode 100644 index 0000000..a012ee7 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashIterators.h
@@ -0,0 +1,282 @@ +/* + * Copyright (C) 2007 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef WTF_HashIterators_h +#define WTF_HashIterators_h + +#include "platform/wtf/Allocator.h" + +namespace WTF { + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableConstKeysIterator; +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableConstValuesIterator; +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableKeysIterator; +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableValuesIterator; + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableConstIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> { + STACK_ALLOCATED(); + + private: + typedef KeyValuePair<KeyType, MappedType> ValueType; + + public: + typedef HashTableConstKeysIterator<HashTableType, KeyType, MappedType> + KeysIterator; + typedef HashTableConstValuesIterator<HashTableType, KeyType, MappedType> + ValuesIterator; + + HashTableConstIteratorAdapter() {} + HashTableConstIteratorAdapter( + const typename HashTableType::const_iterator& impl) + : m_impl(impl) {} + + const ValueType* get() const { return (const ValueType*)m_impl.get(); } + const ValueType& operator*() const { return *get(); } + const ValueType* operator->() const { return get(); } + + HashTableConstIteratorAdapter& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + KeysIterator keys() { return KeysIterator(*this); } + ValuesIterator values() { return ValuesIterator(*this); } + + typename HashTableType::const_iterator m_impl; +}; + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> { + STACK_ALLOCATED(); + + private: + typedef KeyValuePair<KeyType, MappedType> ValueType; + + public: + typedef HashTableKeysIterator<HashTableType, KeyType, MappedType> + KeysIterator; + typedef HashTableValuesIterator<HashTableType, KeyType, MappedType> + ValuesIterator; + + HashTableIteratorAdapter() {} + HashTableIteratorAdapter(const typename HashTableType::iterator& impl) + : m_impl(impl) {} + + ValueType* get() const { return (ValueType*)m_impl.get(); } + ValueType& operator*() const { return *get(); } + ValueType* operator->() const { return get(); } + + HashTableIteratorAdapter& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + operator HashTableConstIteratorAdapter<HashTableType, ValueType>() { + typename HashTableType::const_iterator i = m_impl; + return i; + } + + KeysIterator keys() { return KeysIterator(*this); } + ValuesIterator values() { return ValuesIterator(*this); } + + typename HashTableType::iterator m_impl; +}; + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableConstKeysIterator { + STACK_ALLOCATED(); + + private: + typedef HashTableConstIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> + ConstIterator; + + public: + HashTableConstKeysIterator(const ConstIterator& impl) : m_impl(impl) {} + + const KeyType* get() const { return &(m_impl.get()->key); } + const KeyType& operator*() const { return *get(); } + const KeyType* operator->() const { return get(); } + + HashTableConstKeysIterator& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + ConstIterator m_impl; +}; + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableConstValuesIterator { + STACK_ALLOCATED(); + + private: + typedef HashTableConstIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> + ConstIterator; + + public: + HashTableConstValuesIterator(const ConstIterator& impl) : m_impl(impl) {} + + const MappedType* get() const { return &(m_impl.get()->value); } + const MappedType& operator*() const { return *get(); } + const MappedType* operator->() const { return get(); } + + HashTableConstValuesIterator& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + ConstIterator m_impl; +}; + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableKeysIterator { + STACK_ALLOCATED(); + + private: + typedef HashTableIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> + Iterator; + typedef HashTableConstIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> + ConstIterator; + + public: + HashTableKeysIterator(const Iterator& impl) : m_impl(impl) {} + + KeyType* get() const { return &(m_impl.get()->key); } + KeyType& operator*() const { return *get(); } + KeyType* operator->() const { return get(); } + + HashTableKeysIterator& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + operator HashTableConstKeysIterator<HashTableType, KeyType, MappedType>() { + ConstIterator i = m_impl; + return i; + } + + Iterator m_impl; +}; + +template <typename HashTableType, typename KeyType, typename MappedType> +struct HashTableValuesIterator { + STACK_ALLOCATED(); + + private: + typedef HashTableIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> + Iterator; + typedef HashTableConstIteratorAdapter<HashTableType, + KeyValuePair<KeyType, MappedType>> + ConstIterator; + + public: + HashTableValuesIterator(const Iterator& impl) : m_impl(impl) {} + + MappedType* get() const { return &(m_impl.get()->value); } + MappedType& operator*() const { return *get(); } + MappedType* operator->() const { return get(); } + + HashTableValuesIterator& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + operator HashTableConstValuesIterator<HashTableType, KeyType, MappedType>() { + ConstIterator i = m_impl; + return i; + } + + Iterator m_impl; +}; + +template <typename T, typename U, typename V> +inline bool operator==(const HashTableConstKeysIterator<T, U, V>& a, + const HashTableConstKeysIterator<T, U, V>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator!=(const HashTableConstKeysIterator<T, U, V>& a, + const HashTableConstKeysIterator<T, U, V>& b) { + return a.m_impl != b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator==(const HashTableConstValuesIterator<T, U, V>& a, + const HashTableConstValuesIterator<T, U, V>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator!=(const HashTableConstValuesIterator<T, U, V>& a, + const HashTableConstValuesIterator<T, U, V>& b) { + return a.m_impl != b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator==(const HashTableKeysIterator<T, U, V>& a, + const HashTableKeysIterator<T, U, V>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator!=(const HashTableKeysIterator<T, U, V>& a, + const HashTableKeysIterator<T, U, V>& b) { + return a.m_impl != b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator==(const HashTableValuesIterator<T, U, V>& a, + const HashTableValuesIterator<T, U, V>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U, typename V> +inline bool operator!=(const HashTableValuesIterator<T, U, V>& a, + const HashTableValuesIterator<T, U, V>& b) { + return a.m_impl != b.m_impl; +} + +} // namespace WTF + +#endif // WTF_HashIterators_h
diff --git a/third_party/WebKit/Source/platform/wtf/HashMap.h b/third_party/WebKit/Source/platform/wtf/HashMap.h new file mode 100644 index 0000000..26b87a4 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashMap.h
@@ -0,0 +1,746 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008, 2011 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashMap_h +#define WTF_HashMap_h + +#include "platform/wtf/HashTable.h" +#include "platform/wtf/allocator/PartitionAllocator.h" +#include <initializer_list> + +namespace WTF { + +template <typename KeyTraits, typename MappedTraits> +struct HashMapValueTraits; + +struct KeyValuePairKeyExtractor { + STATIC_ONLY(KeyValuePairKeyExtractor); + template <typename T> + static const typename T::KeyType& extract(const T& p) { + return p.key; + } +}; + +// Note: empty or deleted key values are not allowed, using them may lead to +// undefined behavior. For pointer keys this means that null pointers are not +// allowed unless you supply custom key traits. +template <typename KeyArg, + typename MappedArg, + typename HashArg = typename DefaultHash<KeyArg>::Hash, + typename KeyTraitsArg = HashTraits<KeyArg>, + typename MappedTraitsArg = HashTraits<MappedArg>, + typename Allocator = PartitionAllocator> +class HashMap { + USE_ALLOCATOR(HashMap, Allocator); + + private: + typedef KeyTraitsArg KeyTraits; + typedef MappedTraitsArg MappedTraits; + typedef HashMapValueTraits<KeyTraits, MappedTraits> ValueTraits; + + public: + typedef typename KeyTraits::TraitType KeyType; + typedef const typename KeyTraits::PeekInType& KeyPeekInType; + typedef typename MappedTraits::TraitType MappedType; + typedef typename ValueTraits::TraitType ValueType; + using value_type = ValueType; + + private: + typedef typename MappedTraits::PeekOutType MappedPeekType; + + typedef HashArg HashFunctions; + + typedef HashTable<KeyType, + ValueType, + KeyValuePairKeyExtractor, + HashFunctions, + ValueTraits, + KeyTraits, + Allocator> + HashTableType; + + class HashMapKeysProxy; + class HashMapValuesProxy; + + public: + HashMap() { + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<KeyArg>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap HashMap. Use HeapHashMap<> instead."); + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<MappedArg>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap HashMap. Use HeapHashMap<> instead."); + } + HashMap(const HashMap&) = default; + HashMap& operator=(const HashMap&) = default; + HashMap(HashMap&&) = default; + HashMap& operator=(HashMap&&) = default; + + // For example, HashMap<int, int>({{1, 11}, {2, 22}, {3, 33}}) will give you + // a HashMap containing a mapping {1 -> 11, 2 -> 22, 3 -> 33}. + HashMap(std::initializer_list<ValueType> elements); + HashMap& operator=(std::initializer_list<ValueType> elements); + + typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator; + typedef HashTableConstIteratorAdapter<HashTableType, ValueType> + const_iterator; + typedef typename HashTableType::AddResult AddResult; + + void swap(HashMap& ref) { m_impl.swap(ref.m_impl); } + + unsigned size() const; + unsigned capacity() const; + void reserveCapacityForSize(unsigned size) { + m_impl.reserveCapacityForSize(size); + } + + bool isEmpty() const; + + // iterators iterate over pairs of keys and values + iterator begin(); + iterator end(); + const_iterator begin() const; + const_iterator end() const; + + HashMapKeysProxy& keys() { return static_cast<HashMapKeysProxy&>(*this); } + const HashMapKeysProxy& keys() const { + return static_cast<const HashMapKeysProxy&>(*this); + } + + HashMapValuesProxy& values() { + return static_cast<HashMapValuesProxy&>(*this); + } + const HashMapValuesProxy& values() const { + return static_cast<const HashMapValuesProxy&>(*this); + } + + iterator find(KeyPeekInType); + const_iterator find(KeyPeekInType) const; + bool contains(KeyPeekInType) const; + MappedPeekType at(KeyPeekInType) const; + + // replaces value but not key if key is already present return value is a + // pair of the iterator to the key location, and a boolean that's true if a + // new value was actually added + template <typename IncomingKeyType, typename IncomingMappedType> + AddResult set(IncomingKeyType&&, IncomingMappedType&&); + + // does nothing if key is already present return value is a pair of the + // iterator to the key location, and a boolean that's true if a new value + // was actually added + template <typename IncomingKeyType, typename IncomingMappedType> + AddResult insert(IncomingKeyType&&, IncomingMappedType&&); + + void erase(KeyPeekInType); + void erase(iterator); + void clear(); + template <typename Collection> + void removeAll(const Collection& toBeRemoved) { + WTF::removeAll(*this, toBeRemoved); + } + + MappedType take(KeyPeekInType); // efficient combination of get with remove + + // An alternate version of find() that finds the object by hashing and + // comparing with some other type, to avoid the cost of type + // conversion. HashTranslator must have the following function members: + // static unsigned hash(const T&); + // static bool equal(const ValueType&, const T&); + template <typename HashTranslator, typename T> + iterator find(const T&); + template <typename HashTranslator, typename T> + const_iterator find(const T&) const; + template <typename HashTranslator, typename T> + bool contains(const T&) const; + + // An alternate version of insert() that finds the object by hashing and + // comparing with some other type, to avoid the cost of type conversion if + // the object is already in the table. HashTranslator must have the + // following function members: + // static unsigned hash(const T&); + // static bool equal(const ValueType&, const T&); + // static translate(ValueType&, const T&, unsigned hashCode); + template <typename HashTranslator, + typename IncomingKeyType, + typename IncomingMappedType> + AddResult insert(IncomingKeyType&&, IncomingMappedType&&); + + static bool isValidKey(KeyPeekInType); + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + m_impl.trace(visitor); + } + + private: + template <typename IncomingKeyType, typename IncomingMappedType> + AddResult inlineAdd(IncomingKeyType&&, IncomingMappedType&&); + + HashTableType m_impl; +}; + +template <typename KeyArg, + typename MappedArg, + typename HashArg, + typename KeyTraitsArg, + typename MappedTraitsArg, + typename Allocator> +class HashMap<KeyArg, + MappedArg, + HashArg, + KeyTraitsArg, + MappedTraitsArg, + Allocator>::HashMapKeysProxy : private HashMap<KeyArg, + MappedArg, + HashArg, + KeyTraitsArg, + MappedTraitsArg, + Allocator> { + DISALLOW_NEW(); + + public: + typedef HashMap<KeyArg, + MappedArg, + HashArg, + KeyTraitsArg, + MappedTraitsArg, + Allocator> + HashMapType; + typedef typename HashMapType::iterator::KeysIterator iterator; + typedef typename HashMapType::const_iterator::KeysIterator const_iterator; + + iterator begin() { return HashMapType::begin().keys(); } + + iterator end() { return HashMapType::end().keys(); } + + const_iterator begin() const { return HashMapType::begin().keys(); } + + const_iterator end() const { return HashMapType::end().keys(); } + + private: + friend class HashMap; + + // These are intentionally not implemented. + HashMapKeysProxy(); + HashMapKeysProxy(const HashMapKeysProxy&); + HashMapKeysProxy& operator=(const HashMapKeysProxy&); + ~HashMapKeysProxy(); +}; + +template <typename KeyArg, + typename MappedArg, + typename HashArg, + typename KeyTraitsArg, + typename MappedTraitsArg, + typename Allocator> +class HashMap<KeyArg, + MappedArg, + HashArg, + KeyTraitsArg, + MappedTraitsArg, + Allocator>::HashMapValuesProxy : private HashMap<KeyArg, + MappedArg, + HashArg, + KeyTraitsArg, + MappedTraitsArg, + Allocator> { + DISALLOW_NEW(); + + public: + typedef HashMap<KeyArg, + MappedArg, + HashArg, + KeyTraitsArg, + MappedTraitsArg, + Allocator> + HashMapType; + typedef typename HashMapType::iterator::ValuesIterator iterator; + typedef typename HashMapType::const_iterator::ValuesIterator const_iterator; + + iterator begin() { return HashMapType::begin().values(); } + + iterator end() { return HashMapType::end().values(); } + + const_iterator begin() const { return HashMapType::begin().values(); } + + const_iterator end() const { return HashMapType::end().values(); } + + private: + friend class HashMap; + + // These are intentionally not implemented. + HashMapValuesProxy(); + HashMapValuesProxy(const HashMapValuesProxy&); + HashMapValuesProxy& operator=(const HashMapValuesProxy&); + ~HashMapValuesProxy(); +}; + +template <typename KeyTraits, typename MappedTraits> +struct HashMapValueTraits : KeyValuePairHashTraits<KeyTraits, MappedTraits> { + STATIC_ONLY(HashMapValueTraits); + static const bool hasIsEmptyValueFunction = true; + static bool isEmptyValue( + const typename KeyValuePairHashTraits<KeyTraits, MappedTraits>::TraitType& + value) { + return isHashTraitsEmptyValue<KeyTraits>(value.key); + } +}; + +template <typename ValueTraits, typename HashFunctions> +struct HashMapTranslator { + STATIC_ONLY(HashMapTranslator); + template <typename T> + static unsigned hash(const T& key) { + return HashFunctions::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return HashFunctions::equal(a, b); + } + template <typename T, typename U, typename V> + static void translate(T& location, U&& key, V&& mapped) { + location.key = std::forward<U>(key); + ValueTraits::ValueTraits::store(std::forward<V>(mapped), location.value); + } +}; + +template <typename ValueTraits, typename Translator> +struct HashMapTranslatorAdapter { + STATIC_ONLY(HashMapTranslatorAdapter); + template <typename T> + static unsigned hash(const T& key) { + return Translator::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return Translator::equal(a, b); + } + template <typename T, typename U, typename V> + static void translate(T& location, U&& key, V&& mapped, unsigned hashCode) { + Translator::translate(location.key, std::forward<U>(key), hashCode); + ValueTraits::ValueTraits::store(std::forward<V>(mapped), location.value); + } +}; + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +HashMap<T, U, V, W, X, Y>::HashMap(std::initializer_list<ValueType> elements) { + if (elements.size()) + m_impl.reserveCapacityForSize(elements.size()); + for (const ValueType& element : elements) + insert(element.key, element.value); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +auto HashMap<T, U, V, W, X, Y>::operator=( + std::initializer_list<ValueType> elements) -> HashMap& { + *this = HashMap(std::move(elements)); + return *this; +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline unsigned HashMap<T, U, V, W, X, Y>::size() const { + return m_impl.size(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline unsigned HashMap<T, U, V, W, X, Y>::capacity() const { + return m_impl.capacity(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline bool HashMap<T, U, V, W, X, Y>::isEmpty() const { + return m_impl.isEmpty(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline typename HashMap<T, U, V, W, X, Y>::iterator +HashMap<T, U, V, W, X, Y>::begin() { + return m_impl.begin(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline typename HashMap<T, U, V, W, X, Y>::iterator +HashMap<T, U, V, W, X, Y>::end() { + return m_impl.end(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline typename HashMap<T, U, V, W, X, Y>::const_iterator +HashMap<T, U, V, W, X, Y>::begin() const { + return m_impl.begin(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline typename HashMap<T, U, V, W, X, Y>::const_iterator +HashMap<T, U, V, W, X, Y>::end() const { + return m_impl.end(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline typename HashMap<T, U, V, W, X, Y>::iterator +HashMap<T, U, V, W, X, Y>::find(KeyPeekInType key) { + return m_impl.find(key); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline typename HashMap<T, U, V, W, X, Y>::const_iterator +HashMap<T, U, V, W, X, Y>::find(KeyPeekInType key) const { + return m_impl.find(key); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline bool HashMap<T, U, V, W, X, Y>::contains(KeyPeekInType key) const { + return m_impl.contains(key); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename HashTranslator, typename TYPE> +inline typename HashMap<T, U, V, W, X, Y>::iterator +HashMap<T, U, V, W, X, Y>::find(const TYPE& value) { + return m_impl + .template find<HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( + value); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename HashTranslator, typename TYPE> +inline typename HashMap<T, U, V, W, X, Y>::const_iterator +HashMap<T, U, V, W, X, Y>::find(const TYPE& value) const { + return m_impl + .template find<HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( + value); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename HashTranslator, typename TYPE> +inline bool HashMap<T, U, V, W, X, Y>::contains(const TYPE& value) const { + return m_impl + .template contains<HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( + value); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename IncomingKeyType, typename IncomingMappedType> +typename HashMap<T, U, V, W, X, Y>::AddResult +HashMap<T, U, V, W, X, Y>::inlineAdd(IncomingKeyType&& key, + IncomingMappedType&& mapped) { + return m_impl.template add<HashMapTranslator<ValueTraits, HashFunctions>>( + std::forward<IncomingKeyType>(key), + std::forward<IncomingMappedType>(mapped)); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename IncomingKeyType, typename IncomingMappedType> +typename HashMap<T, U, V, W, X, Y>::AddResult HashMap<T, U, V, W, X, Y>::set( + IncomingKeyType&& key, + IncomingMappedType&& mapped) { + AddResult result = inlineAdd(std::forward<IncomingKeyType>(key), + std::forward<IncomingMappedType>(mapped)); + if (!result.isNewEntry) { + // The inlineAdd call above found an existing hash table entry; we need + // to set the mapped value. + // + // It's safe to call std::forward again, because |mapped| isn't moved if + // there's an existing entry. + MappedTraits::store(std::forward<IncomingMappedType>(mapped), + result.storedValue->value); + } + return result; +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename HashTranslator, + typename IncomingKeyType, + typename IncomingMappedType> +auto HashMap<T, U, V, W, X, Y>::insert(IncomingKeyType&& key, + IncomingMappedType&& mapped) + -> AddResult { + return m_impl.template addPassingHashCode< + HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( + std::forward<IncomingKeyType>(key), + std::forward<IncomingMappedType>(mapped)); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +template <typename IncomingKeyType, typename IncomingMappedType> +typename HashMap<T, U, V, W, X, Y>::AddResult HashMap<T, U, V, W, X, Y>::insert( + IncomingKeyType&& key, + IncomingMappedType&& mapped) { + return inlineAdd(std::forward<IncomingKeyType>(key), + std::forward<IncomingMappedType>(mapped)); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +typename HashMap<T, U, V, W, X, Y>::MappedPeekType +HashMap<T, U, V, W, X, Y>::at(KeyPeekInType key) const { + ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key); + if (!entry) + return MappedTraits::peek(MappedTraits::emptyValue()); + return MappedTraits::peek(entry->value); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline void HashMap<T, U, V, W, X, Y>::erase(iterator it) { + m_impl.remove(it.m_impl); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline void HashMap<T, U, V, W, X, Y>::erase(KeyPeekInType key) { + erase(find(key)); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline void HashMap<T, U, V, W, X, Y>::clear() { + m_impl.clear(); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +auto HashMap<T, U, V, W, X, Y>::take(KeyPeekInType key) -> MappedType { + iterator it = find(key); + if (it == end()) + return MappedTraits::emptyValue(); + MappedType result = std::move(it->value); + erase(it); + return result; +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline bool HashMap<T, U, V, W, X, Y>::isValidKey(KeyPeekInType key) { + if (KeyTraits::isDeletedValue(key)) + return false; + + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (key == KeyTraits::emptyValue()) + return false; + } else { + if (isHashTraitsEmptyValue<KeyTraits>(key)) + return false; + } + + return true; +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +bool operator==(const HashMap<T, U, V, W, X, Y>& a, + const HashMap<T, U, V, W, X, Y>& b) { + if (a.size() != b.size()) + return false; + + typedef typename HashMap<T, U, V, W, X, Y>::const_iterator const_iterator; + + const_iterator aEnd = a.end(); + const_iterator bEnd = b.end(); + for (const_iterator it = a.begin(); it != aEnd; ++it) { + const_iterator bPos = b.find(it->key); + if (bPos == bEnd || it->value != bPos->value) + return false; + } + + return true; +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y> +inline bool operator!=(const HashMap<T, U, V, W, X, Y>& a, + const HashMap<T, U, V, W, X, Y>& b) { + return !(a == b); +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y, + typename Z> +inline void copyKeysToVector(const HashMap<T, U, V, W, X, Y>& collection, + Z& vector) { + typedef + typename HashMap<T, U, V, W, X, Y>::const_iterator::KeysIterator iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin().keys(); + iterator end = collection.end().keys(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; +} + +template <typename T, + typename U, + typename V, + typename W, + typename X, + typename Y, + typename Z> +inline void copyValuesToVector(const HashMap<T, U, V, W, X, Y>& collection, + Z& vector) { + typedef typename HashMap<T, U, V, W, X, Y>::const_iterator::ValuesIterator + iterator; + + vector.resize(collection.size()); + + iterator it = collection.begin().values(); + iterator end = collection.end().values(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; +} + +} // namespace WTF + +using WTF::HashMap; + +#endif // WTF_HashMap_h
diff --git a/third_party/WebKit/Source/platform/wtf/HashSet.h b/third_party/WebKit/Source/platform/wtf/HashSet.h new file mode 100644 index 0000000..297a156 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashSet.h
@@ -0,0 +1,330 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008, 2011 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashSet_h +#define WTF_HashSet_h + +#include "platform/wtf/HashTable.h" +#include "platform/wtf/allocator/PartitionAllocator.h" +#include <initializer_list> + +namespace WTF { + +struct IdentityExtractor; + +// Note: empty or deleted values are not allowed, using them may lead to +// undefined behavior. For pointer valuess this means that null pointers are +// not allowed unless you supply custom traits. +template <typename ValueArg, + typename HashArg = typename DefaultHash<ValueArg>::Hash, + typename TraitsArg = HashTraits<ValueArg>, + typename Allocator = PartitionAllocator> +class HashSet { + USE_ALLOCATOR(HashSet, Allocator); + + private: + typedef HashArg HashFunctions; + typedef TraitsArg ValueTraits; + typedef typename ValueTraits::PeekInType ValuePeekInType; + + public: + typedef typename ValueTraits::TraitType ValueType; + using value_type = ValueType; + + private: + typedef HashTable<ValueType, + ValueType, + IdentityExtractor, + HashFunctions, + ValueTraits, + ValueTraits, + Allocator> + HashTableType; + + public: + typedef HashTableConstIteratorAdapter<HashTableType, ValueTraits> iterator; + typedef HashTableConstIteratorAdapter<HashTableType, ValueTraits> + const_iterator; + typedef typename HashTableType::AddResult AddResult; + + HashSet() { + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<ValueArg>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap HashSet. Use HeapHashSet<Member<T>> instead."); + } + HashSet(const HashSet&) = default; + HashSet& operator=(const HashSet&) = default; + HashSet(HashSet&&) = default; + HashSet& operator=(HashSet&&) = default; + + HashSet(std::initializer_list<ValueType> elements); + HashSet& operator=(std::initializer_list<ValueType> elements); + + void swap(HashSet& ref) { m_impl.swap(ref.m_impl); } + + unsigned size() const; + unsigned capacity() const; + bool isEmpty() const; + + void reserveCapacityForSize(unsigned size) { + m_impl.reserveCapacityForSize(size); + } + + iterator begin() const; + iterator end() const; + + iterator find(ValuePeekInType) const; + bool contains(ValuePeekInType) const; + + // An alternate version of find() that finds the object by hashing and + // comparing with some other type, to avoid the cost of type + // conversion. HashTranslator must have the following function members: + // static unsigned hash(const T&); + // static bool equal(const ValueType&, const T&); + template <typename HashTranslator, typename T> + iterator find(const T&) const; + template <typename HashTranslator, typename T> + bool contains(const T&) const; + + // The return value is a pair of an iterator to the new value's location, + // and a bool that is true if an new entry was added. + template <typename IncomingValueType> + AddResult insert(IncomingValueType&&); + + // An alternate version of add() that finds the object by hashing and + // comparing with some other type, to avoid the cost of type conversion if + // the object is already in the table. HashTranslator must have the + // following function members: + // static unsigned hash(const T&); + // static bool equal(const ValueType&, const T&); + // static translate(ValueType&, T&&, unsigned hashCode); + template <typename HashTranslator, typename T> + AddResult addWithTranslator(T&&); + + void erase(ValuePeekInType); + void erase(iterator); + void clear(); + template <typename Collection> + void removeAll(const Collection& toBeRemoved) { + WTF::removeAll(*this, toBeRemoved); + } + + ValueType take(iterator); + ValueType take(ValuePeekInType); + ValueType takeAny(); + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + m_impl.trace(visitor); + } + + private: + HashTableType m_impl; +}; + +struct IdentityExtractor { + STATIC_ONLY(IdentityExtractor); + template <typename T> + static const T& extract(const T& t) { + return t; + } +}; + +template <typename Translator> +struct HashSetTranslatorAdapter { + STATIC_ONLY(HashSetTranslatorAdapter); + template <typename T> + static unsigned hash(const T& key) { + return Translator::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return Translator::equal(a, b); + } + template <typename T, typename U, typename V> + static void translate(T& location, U&& key, const V&, unsigned hashCode) { + Translator::translate(location, std::forward<U>(key), hashCode); + } +}; + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +HashSet<Value, HashFunctions, Traits, Allocator>::HashSet( + std::initializer_list<ValueType> elements) { + if (elements.size()) + m_impl.reserveCapacityForSize(elements.size()); + for (const ValueType& element : elements) + insert(element); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +auto HashSet<Value, HashFunctions, Traits, Allocator>::operator=( + std::initializer_list<ValueType> elements) -> HashSet& { + *this = HashSet(std::move(elements)); + return *this; +} + +template <typename T, typename U, typename V, typename W> +inline unsigned HashSet<T, U, V, W>::size() const { + return m_impl.size(); +} + +template <typename T, typename U, typename V, typename W> +inline unsigned HashSet<T, U, V, W>::capacity() const { + return m_impl.capacity(); +} + +template <typename T, typename U, typename V, typename W> +inline bool HashSet<T, U, V, W>::isEmpty() const { + return m_impl.isEmpty(); +} + +template <typename T, typename U, typename V, typename W> +inline typename HashSet<T, U, V, W>::iterator HashSet<T, U, V, W>::begin() + const { + return m_impl.begin(); +} + +template <typename T, typename U, typename V, typename W> +inline typename HashSet<T, U, V, W>::iterator HashSet<T, U, V, W>::end() const { + return m_impl.end(); +} + +template <typename T, typename U, typename V, typename W> +inline typename HashSet<T, U, V, W>::iterator HashSet<T, U, V, W>::find( + ValuePeekInType value) const { + return m_impl.find(value); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +inline bool HashSet<Value, HashFunctions, Traits, Allocator>::contains( + ValuePeekInType value) const { + return m_impl.contains(value); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +template <typename HashTranslator, typename T> +typename HashSet<Value, HashFunctions, Traits, Allocator>:: + iterator inline HashSet<Value, HashFunctions, Traits, Allocator>::find( + const T& value) const { + return m_impl.template find<HashSetTranslatorAdapter<HashTranslator>>(value); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +template <typename HashTranslator, typename T> +inline bool HashSet<Value, HashFunctions, Traits, Allocator>::contains( + const T& value) const { + return m_impl.template contains<HashSetTranslatorAdapter<HashTranslator>>( + value); +} + +template <typename T, typename U, typename V, typename W> +template <typename IncomingValueType> +inline typename HashSet<T, U, V, W>::AddResult HashSet<T, U, V, W>::insert( + IncomingValueType&& value) { + return m_impl.add(std::forward<IncomingValueType>(value)); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +template <typename HashTranslator, typename T> +inline typename HashSet<Value, HashFunctions, Traits, Allocator>::AddResult +HashSet<Value, HashFunctions, Traits, Allocator>::addWithTranslator(T&& value) { + // Forward only the first argument, because the second argument isn't actually + // used in HashSetTranslatorAdapter. + return m_impl + .template addPassingHashCode<HashSetTranslatorAdapter<HashTranslator>>( + std::forward<T>(value), value); +} + +template <typename T, typename U, typename V, typename W> +inline void HashSet<T, U, V, W>::erase(iterator it) { + m_impl.remove(it.m_impl); +} + +template <typename T, typename U, typename V, typename W> +inline void HashSet<T, U, V, W>::erase(ValuePeekInType value) { + erase(find(value)); +} + +template <typename T, typename U, typename V, typename W> +inline void HashSet<T, U, V, W>::clear() { + m_impl.clear(); +} + +template <typename T, typename U, typename V, typename W> +inline auto HashSet<T, U, V, W>::take(iterator it) -> ValueType { + if (it == end()) + return ValueTraits::emptyValue(); + + ValueType result = std::move(const_cast<ValueType&>(*it)); + erase(it); + + return result; +} + +template <typename T, typename U, typename V, typename W> +inline auto HashSet<T, U, V, W>::take(ValuePeekInType value) -> ValueType { + return take(find(value)); +} + +template <typename T, typename U, typename V, typename W> +inline auto HashSet<T, U, V, W>::takeAny() -> ValueType { + return take(begin()); +} + +template <typename C, typename W> +inline void copyToVector(const C& collection, W& vector) { + typedef typename C::const_iterator iterator; + + { + // Disallow GC across resize allocation, see crbug.com/568173 + typename W::GCForbiddenScope scope; + vector.resize(collection.size()); + } + + iterator it = collection.begin(); + iterator end = collection.end(); + for (unsigned i = 0; it != end; ++it, ++i) + vector[i] = *it; +} + +} // namespace WTF + +using WTF::HashSet; + +#endif // WTF_HashSet_h
diff --git a/third_party/WebKit/Source/wtf/HashTable.cpp b/third_party/WebKit/Source/platform/wtf/HashTable.cpp similarity index 95% rename from third_party/WebKit/Source/wtf/HashTable.cpp rename to third_party/WebKit/Source/platform/wtf/HashTable.cpp index 0a8c370..11e479a 100644 --- a/third_party/WebKit/Source/wtf/HashTable.cpp +++ b/third_party/WebKit/Source/platform/wtf/HashTable.cpp
@@ -17,12 +17,12 @@ Boston, MA 02110-1301, USA. */ -#include "wtf/HashTable.h" +#include "platform/wtf/HashTable.h" #if DUMP_HASHTABLE_STATS -#include "wtf/DataLog.h" -#include "wtf/ThreadingPrimitives.h" +#include "platform/wtf/DataLog.h" +#include "platform/wtf/ThreadingPrimitives.h" namespace WTF {
diff --git a/third_party/WebKit/Source/platform/wtf/HashTable.h b/third_party/WebKit/Source/platform/wtf/HashTable.h new file mode 100644 index 0000000..041cdce --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashTable.h
@@ -0,0 +1,2280 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights + * reserved. + * Copyright (C) 2008 David Levin <levin@chromium.org> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library + * General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashTable_h +#define WTF_HashTable_h + +#include "platform/wtf/Alignment.h" +#include "platform/wtf/Allocator.h" +#include "platform/wtf/Assertions.h" +#include "platform/wtf/ConditionalDestructor.h" +#include "platform/wtf/HashTraits.h" +#include "platform/wtf/PtrUtil.h" +#include "platform/wtf/allocator/PartitionAllocator.h" +#include <memory> + +#define DUMP_HASHTABLE_STATS 0 +#define DUMP_HASHTABLE_STATS_PER_TABLE 0 + +#if DUMP_HASHTABLE_STATS +#include "platform/wtf/Atomics.h" +#include "platform/wtf/Threading.h" +#endif + +#if DUMP_HASHTABLE_STATS_PER_TABLE +#include "platform/wtf/DataLog.h" +#include <type_traits> +#endif + +#if DUMP_HASHTABLE_STATS +#if DUMP_HASHTABLE_STATS_PER_TABLE + +#define UPDATE_PROBE_COUNTS() \ + ++probeCount; \ + HashTableStats::instance().recordCollisionAtCount(probeCount); \ + ++perTableProbeCount; \ + m_stats->recordCollisionAtCount(perTableProbeCount) +#define UPDATE_ACCESS_COUNTS() \ + atomicIncrement(&HashTableStats::instance().numAccesses); \ + int probeCount = 0; \ + ++m_stats->numAccesses; \ + int perTableProbeCount = 0 +#else +#define UPDATE_PROBE_COUNTS() \ + ++probeCount; \ + HashTableStats::instance().recordCollisionAtCount(probeCount) +#define UPDATE_ACCESS_COUNTS() \ + atomicIncrement(&HashTableStats::instance().numAccesses); \ + int probeCount = 0 +#endif +#else +#if DUMP_HASHTABLE_STATS_PER_TABLE +#define UPDATE_PROBE_COUNTS() \ + ++perTableProbeCount; \ + m_stats->recordCollisionAtCount(perTableProbeCount) +#define UPDATE_ACCESS_COUNTS() \ + ++m_stats->numAccesses; \ + int perTableProbeCount = 0 +#else +#define UPDATE_PROBE_COUNTS() \ + do { \ + } while (0) +#define UPDATE_ACCESS_COUNTS() \ + do { \ + } while (0) +#endif +#endif + +namespace WTF { + +// This is for tracing inside collections that have special support for weak +// pointers. The trait has a trace method which returns true if there are weak +// pointers to things that have not (yet) been marked live. Returning true +// indicates that the entry in the collection may yet be removed by weak +// handling. Default implementation for non-weak types is to use the regular +// non-weak TraceTrait. Default implementation for types with weakness is to +// call traceInCollection on the type's trait. +template <WeakHandlingFlag weakHandlingFlag, + ShouldWeakPointersBeMarkedStrongly strongify, + typename T, + typename Traits> +struct TraceInCollectionTrait; + +#if DUMP_HASHTABLE_STATS +struct WTF_EXPORT HashTableStats { + HashTableStats() + : numAccesses(0), + numRehashes(0), + numRemoves(0), + numReinserts(0), + maxCollisions(0), + numCollisions(0), + collisionGraph() {} + + // The following variables are all atomically incremented when modified. + int numAccesses; + int numRehashes; + int numRemoves; + int numReinserts; + + // The following variables are only modified in the recordCollisionAtCount + // method within a mutex. + int maxCollisions; + int numCollisions; + int collisionGraph[4096]; + + void copy(const HashTableStats* other); + void recordCollisionAtCount(int count); + void dumpStats(); + + static HashTableStats& instance(); + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher) {} +}; + +#if DUMP_HASHTABLE_STATS_PER_TABLE +template <typename Allocator, bool isGCType = Allocator::isGarbageCollected> +class HashTableStatsPtr; + +template <typename Allocator> +class HashTableStatsPtr<Allocator, false> final { + STATIC_ONLY(HashTableStatsPtr); + + public: + static std::unique_ptr<HashTableStats> create() { + return WTF::wrapUnique(new HashTableStats); + } + + static std::unique_ptr<HashTableStats> copy( + const std::unique_ptr<HashTableStats>& other) { + if (!other) + return nullptr; + return WTF::wrapUnique(new HashTableStats(*other)); + } + + static void swap(std::unique_ptr<HashTableStats>& stats, + std::unique_ptr<HashTableStats>& other) { + stats.swap(other); + } +}; + +template <typename Allocator> +class HashTableStatsPtr<Allocator, true> final { + STATIC_ONLY(HashTableStatsPtr); + + public: + static HashTableStats* create() { + // Resort to manually allocating this POD on the vector + // backing heap, as blink::GarbageCollected<> isn't in scope + // in WTF. + void* storage = reinterpret_cast<void*>( + Allocator::template allocateVectorBacking<unsigned char>( + sizeof(HashTableStats))); + return new (storage) HashTableStats; + } + + static HashTableStats* copy(const HashTableStats* other) { + if (!other) + return nullptr; + HashTableStats* obj = create(); + obj->copy(other); + return obj; + } + + static void swap(HashTableStats*& stats, HashTableStats*& other) { + std::swap(stats, other); + } +}; +#endif +#endif + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +class HashTable; +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +class HashTableIterator; +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +class HashTableConstIterator; +template <typename Value, + typename HashFunctions, + typename HashTraits, + typename Allocator> +class LinkedHashSet; +template <WeakHandlingFlag x, + typename T, + typename U, + typename V, + typename W, + typename X, + typename Y, + typename Z> +struct WeakProcessingHashTableHelper; + +typedef enum { HashItemKnownGood } HashItemKnownGoodTag; + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +class HashTableConstIterator final { + DISALLOW_NEW(); + + private: + typedef HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + HashTableType; + typedef HashTableIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + iterator; + typedef HashTableConstIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + const_iterator; + typedef Value ValueType; + using value_type = ValueType; + typedef typename Traits::IteratorConstGetType GetType; + typedef const ValueType* PointerType; + + friend class HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>; + friend class HashTableIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>; + + void skipEmptyBuckets() { + while (m_position != m_endPosition && + HashTableType::isEmptyOrDeletedBucket(*m_position)) + ++m_position; + } + + HashTableConstIterator(PointerType position, + PointerType endPosition, + const HashTableType* container) + : m_position(position), + m_endPosition(endPosition) +#if DCHECK_IS_ON() + , + m_container(container), + m_containerModifications(container->modifications()) +#endif + { + skipEmptyBuckets(); + } + + HashTableConstIterator(PointerType position, + PointerType endPosition, + const HashTableType* container, + HashItemKnownGoodTag) + : m_position(position), + m_endPosition(endPosition) +#if DCHECK_IS_ON() + , + m_container(container), + m_containerModifications(container->modifications()) +#endif + { +#if DCHECK_IS_ON() + DCHECK_EQ(m_containerModifications, m_container->modifications()); +#endif + } + + void checkModifications() const { +#if DCHECK_IS_ON() + // HashTable and collections that build on it do not support + // modifications while there is an iterator in use. The exception is + // ListHashSet, which has its own iterators that tolerate modification + // of the underlying set. + DCHECK_EQ(m_containerModifications, m_container->modifications()); + DCHECK(!m_container->accessForbidden()); +#endif + } + + public: + HashTableConstIterator() {} + + GetType get() const { + checkModifications(); + return m_position; + } + typename Traits::IteratorConstReferenceType operator*() const { + return Traits::getToReferenceConstConversion(get()); + } + GetType operator->() const { return get(); } + + const_iterator& operator++() { + DCHECK_NE(m_position, m_endPosition); + checkModifications(); + ++m_position; + skipEmptyBuckets(); + return *this; + } + + // postfix ++ intentionally omitted + + // Comparison. + bool operator==(const const_iterator& other) const { + return m_position == other.m_position; + } + bool operator!=(const const_iterator& other) const { + return m_position != other.m_position; + } + bool operator==(const iterator& other) const { + return *this == static_cast<const_iterator>(other); + } + bool operator!=(const iterator& other) const { + return *this != static_cast<const_iterator>(other); + } + + std::ostream& printTo(std::ostream& stream) const { + if (m_position == m_endPosition) + return stream << "iterator representing <end>"; + // TODO(tkent): Change |m_position| to |*m_position| to show the + // pointed object. It requires a lot of new stream printer functions. + return stream << "iterator pointing to " << m_position; + } + + private: + PointerType m_position; + PointerType m_endPosition; +#if DCHECK_IS_ON() + const HashTableType* m_container; + int64_t m_containerModifications; +#endif +}; + +template <typename Key, + typename Value, + typename Extractor, + typename Hash, + typename Traits, + typename KeyTraits, + typename Allocator> +std::ostream& operator<<(std::ostream& stream, + const HashTableConstIterator<Key, + Value, + Extractor, + Hash, + Traits, + KeyTraits, + Allocator>& iterator) { + return iterator.printTo(stream); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +class HashTableIterator final { + DISALLOW_NEW(); + + private: + typedef HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + HashTableType; + typedef HashTableIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + iterator; + typedef HashTableConstIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + const_iterator; + typedef Value ValueType; + typedef typename Traits::IteratorGetType GetType; + typedef ValueType* PointerType; + + friend class HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>; + + HashTableIterator(PointerType pos, + PointerType end, + const HashTableType* container) + : m_iterator(pos, end, container) {} + HashTableIterator(PointerType pos, + PointerType end, + const HashTableType* container, + HashItemKnownGoodTag tag) + : m_iterator(pos, end, container, tag) {} + + public: + HashTableIterator() {} + + // default copy, assignment and destructor are OK + + GetType get() const { return const_cast<GetType>(m_iterator.get()); } + typename Traits::IteratorReferenceType operator*() const { + return Traits::getToReferenceConversion(get()); + } + GetType operator->() const { return get(); } + + iterator& operator++() { + ++m_iterator; + return *this; + } + + // postfix ++ intentionally omitted + + // Comparison. + bool operator==(const iterator& other) const { + return m_iterator == other.m_iterator; + } + bool operator!=(const iterator& other) const { + return m_iterator != other.m_iterator; + } + bool operator==(const const_iterator& other) const { + return m_iterator == other; + } + bool operator!=(const const_iterator& other) const { + return m_iterator != other; + } + + operator const_iterator() const { return m_iterator; } + std::ostream& printTo(std::ostream& stream) const { + return m_iterator.printTo(stream); + } + + private: + const_iterator m_iterator; +}; + +template <typename Key, + typename Value, + typename Extractor, + typename Hash, + typename Traits, + typename KeyTraits, + typename Allocator> +std::ostream& operator<<(std::ostream& stream, + const HashTableIterator<Key, + Value, + Extractor, + Hash, + Traits, + KeyTraits, + Allocator>& iterator) { + return iterator.printTo(stream); +} + +using std::swap; + +template <typename T, typename Allocator, bool enterGCForbiddenScope> +struct Mover { + STATIC_ONLY(Mover); + static void move(T&& from, T& to) { + to.~T(); + new (NotNull, &to) T(std::move(from)); + } +}; + +template <typename T, typename Allocator> +struct Mover<T, Allocator, true> { + STATIC_ONLY(Mover); + static void move(T&& from, T& to) { + to.~T(); + Allocator::enterGCForbiddenScope(); + new (NotNull, &to) T(std::move(from)); + Allocator::leaveGCForbiddenScope(); + } +}; + +template <typename HashFunctions> +class IdentityHashTranslator { + STATIC_ONLY(IdentityHashTranslator); + + public: + template <typename T> + static unsigned hash(const T& key) { + return HashFunctions::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return HashFunctions::equal(a, b); + } + template <typename T, typename U, typename V> + static void translate(T& location, U&&, V&& value) { + location = std::forward<V>(value); + } +}; + +template <typename HashTableType, typename ValueType> +struct HashTableAddResult final { + STACK_ALLOCATED(); + HashTableAddResult(const HashTableType* container, + ValueType* storedValue, + bool isNewEntry) + : storedValue(storedValue), + isNewEntry(isNewEntry) +#if ENABLE(SECURITY_ASSERT) + , + m_container(container), + m_containerModifications(container->modifications()) +#endif + { + ALLOW_UNUSED_LOCAL(container); + DCHECK(container); + } + + ValueType* storedValue; + bool isNewEntry; + +#if ENABLE(SECURITY_ASSERT) + ~HashTableAddResult() { + // If rehash happened before accessing storedValue, it's + // use-after-free. Any modification may cause a rehash, so we check for + // modifications here. + + // Rehash after accessing storedValue is harmless but will assert if the + // AddResult destructor takes place after a modification. You may need + // to limit the scope of the AddResult. + SECURITY_DCHECK(m_containerModifications == m_container->modifications()); + } + + private: + const HashTableType* m_container; + const int64_t m_containerModifications; +#endif +}; + +template <typename Value, typename Extractor, typename KeyTraits> +struct HashTableHelper { + STATIC_ONLY(HashTableHelper); + static bool isEmptyBucket(const Value& value) { + return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); + } + static bool isDeletedBucket(const Value& value) { + return KeyTraits::isDeletedValue(Extractor::extract(value)); + } + static bool isEmptyOrDeletedBucket(const Value& value) { + return isEmptyBucket(value) || isDeletedBucket(value); + } +}; + +template <typename HashTranslator, + typename KeyTraits, + bool safeToCompareToEmptyOrDeleted> +struct HashTableKeyChecker { + STATIC_ONLY(HashTableKeyChecker); + // There's no simple generic way to make this check if + // safeToCompareToEmptyOrDeleted is false, so the check always passes. + template <typename T> + static bool checkKey(const T&) { + return true; + } +}; + +template <typename HashTranslator, typename KeyTraits> +struct HashTableKeyChecker<HashTranslator, KeyTraits, true> { + STATIC_ONLY(HashTableKeyChecker); + template <typename T> + static bool checkKey(const T& key) { + // FIXME : Check also equality to the deleted value. + return !HashTranslator::equal(KeyTraits::emptyValue(), key); + } +}; + +// Note: empty or deleted key values are not allowed, using them may lead to +// undefined behavior. For pointer keys this means that null pointers are not +// allowed unless you supply custom key traits. +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +class HashTable final + : public ConditionalDestructor<HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>, + Allocator::isGarbageCollected> { + DISALLOW_NEW(); + + public: + typedef HashTableIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + iterator; + typedef HashTableConstIterator<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> + const_iterator; + typedef Traits ValueTraits; + typedef Key KeyType; + typedef typename KeyTraits::PeekInType KeyPeekInType; + typedef Value ValueType; + typedef Extractor ExtractorType; + typedef KeyTraits KeyTraitsType; + typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType; + typedef HashTableAddResult<HashTable, ValueType> AddResult; + + HashTable(); + void finalize() { + DCHECK(!Allocator::isGarbageCollected); + if (LIKELY(!m_table)) + return; + enterAccessForbiddenScope(); + deleteAllBucketsAndDeallocate(m_table, m_tableSize); + leaveAccessForbiddenScope(); + m_table = nullptr; + } + + HashTable(const HashTable&); + HashTable(HashTable&&); + void swap(HashTable&); + HashTable& operator=(const HashTable&); + HashTable& operator=(HashTable&&); + + // When the hash table is empty, just return the same iterator for end as + // for begin. This is more efficient because we don't have to skip all the + // empty and deleted buckets, and iterating an empty table is a common case + // that's worth optimizing. + iterator begin() { return isEmpty() ? end() : makeIterator(m_table); } + iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); } + const_iterator begin() const { + return isEmpty() ? end() : makeConstIterator(m_table); + } + const_iterator end() const { + return makeKnownGoodConstIterator(m_table + m_tableSize); + } + + unsigned size() const { + DCHECK(!accessForbidden()); + return m_keyCount; + } + unsigned capacity() const { + DCHECK(!accessForbidden()); + return m_tableSize; + } + bool isEmpty() const { + DCHECK(!accessForbidden()); + return !m_keyCount; + } + + void reserveCapacityForSize(unsigned size); + + template <typename IncomingValueType> + AddResult add(IncomingValueType&& value) { + return add<IdentityTranslatorType>(Extractor::extract(value), + std::forward<IncomingValueType>(value)); + } + + // A special version of add() that finds the object by hashing and comparing + // with some other type, to avoid the cost of type conversion if the object + // is already in the table. + template <typename HashTranslator, typename T, typename Extra> + AddResult add(T&& key, Extra&&); + template <typename HashTranslator, typename T, typename Extra> + AddResult addPassingHashCode(T&& key, Extra&&); + + iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); } + const_iterator find(KeyPeekInType key) const { + return find<IdentityTranslatorType>(key); + } + bool contains(KeyPeekInType key) const { + return contains<IdentityTranslatorType>(key); + } + + template <typename HashTranslator, typename T> + iterator find(const T&); + template <typename HashTranslator, typename T> + const_iterator find(const T&) const; + template <typename HashTranslator, typename T> + bool contains(const T&) const; + + void remove(KeyPeekInType); + void remove(iterator); + void remove(const_iterator); + void clear(); + + static bool isEmptyBucket(const ValueType& value) { + return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); + } + static bool isDeletedBucket(const ValueType& value) { + return KeyTraits::isDeletedValue(Extractor::extract(value)); + } + static bool isEmptyOrDeletedBucket(const ValueType& value) { + return HashTableHelper<ValueType, Extractor, + KeyTraits>::isEmptyOrDeletedBucket(value); + } + + ValueType* lookup(KeyPeekInType key) { + return lookup<IdentityTranslatorType, KeyPeekInType>(key); + } + template <typename HashTranslator, typename T> + ValueType* lookup(const T&); + template <typename HashTranslator, typename T> + const ValueType* lookup(const T&) const; + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher); + +#if DCHECK_IS_ON() + void enterAccessForbiddenScope() { + DCHECK(!m_accessForbidden); + m_accessForbidden = true; + } + void leaveAccessForbiddenScope() { m_accessForbidden = false; } + bool accessForbidden() const { return m_accessForbidden; } + int64_t modifications() const { return m_modifications; } + void registerModification() { m_modifications++; } + // HashTable and collections that build on it do not support modifications + // while there is an iterator in use. The exception is ListHashSet, which + // has its own iterators that tolerate modification of the underlying set. + void checkModifications(int64_t mods) const { + DCHECK_EQ(mods, m_modifications); + } +#else + ALWAYS_INLINE void enterAccessForbiddenScope() {} + ALWAYS_INLINE void leaveAccessForbiddenScope() {} + ALWAYS_INLINE bool accessForbidden() const { return false; } + ALWAYS_INLINE int64_t modifications() const { return 0; } + ALWAYS_INLINE void registerModification() {} + ALWAYS_INLINE void checkModifications(int64_t mods) const {} +#endif + + private: + static ValueType* allocateTable(unsigned size); + static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size); + + typedef std::pair<ValueType*, bool> LookupType; + typedef std::pair<LookupType, unsigned> FullLookupType; + + LookupType lookupForWriting(const Key& key) { + return lookupForWriting<IdentityTranslatorType>(key); + } + template <typename HashTranslator, typename T> + FullLookupType fullLookupForWriting(const T&); + template <typename HashTranslator, typename T> + LookupType lookupForWriting(const T&); + + void remove(ValueType*); + + bool shouldExpand() const { + return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; + } + bool mustRehashInPlace() const { + return m_keyCount * m_minLoad < m_tableSize * 2; + } + bool shouldShrink() const { + // isAllocationAllowed check should be at the last because it's + // expensive. + return m_keyCount * m_minLoad < m_tableSize && + m_tableSize > KeyTraits::minimumTableSize && + Allocator::isAllocationAllowed(); + } + ValueType* expand(ValueType* entry = 0); + void shrink() { rehash(m_tableSize / 2, 0); } + + ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&); + ValueType* rehashTo(ValueType* newTable, + unsigned newTableSize, + ValueType* entry); + ValueType* rehash(unsigned newTableSize, ValueType* entry); + ValueType* reinsert(ValueType&&); + + static void initializeBucket(ValueType& bucket); + static void deleteBucket(ValueType& bucket) { + bucket.~ValueType(); + Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected); + } + + FullLookupType makeLookupResult(ValueType* position, + bool found, + unsigned hash) { + return FullLookupType(LookupType(position, found), hash); + } + + iterator makeIterator(ValueType* pos) { + return iterator(pos, m_table + m_tableSize, this); + } + const_iterator makeConstIterator(ValueType* pos) const { + return const_iterator(pos, m_table + m_tableSize, this); + } + iterator makeKnownGoodIterator(ValueType* pos) { + return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); + } + const_iterator makeKnownGoodConstIterator(ValueType* pos) const { + return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); + } + + static const unsigned m_maxLoad = 2; + static const unsigned m_minLoad = 6; + + unsigned tableSizeMask() const { + size_t mask = m_tableSize - 1; + DCHECK_EQ((mask & m_tableSize), 0u); + return mask; + } + + void setEnqueued() { m_queueFlag = true; } + void clearEnqueued() { m_queueFlag = false; } + bool enqueued() { return m_queueFlag; } + + ValueType* m_table; + unsigned m_tableSize; + unsigned m_keyCount; +#if DCHECK_IS_ON() + unsigned m_deletedCount : 30; + unsigned m_queueFlag : 1; + unsigned m_accessForbidden : 1; + unsigned m_modifications; +#else + unsigned m_deletedCount : 31; + unsigned m_queueFlag : 1; +#endif + +#if DUMP_HASHTABLE_STATS_PER_TABLE + public: + mutable + typename std::conditional<Allocator::isGarbageCollected, + HashTableStats*, + std::unique_ptr<HashTableStats>>::type m_stats; +#endif + + template <WeakHandlingFlag x, + typename T, + typename U, + typename V, + typename W, + typename X, + typename Y, + typename Z> + friend struct WeakProcessingHashTableHelper; + template <typename T, typename U, typename V, typename W> + friend class LinkedHashSet; +}; + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +inline HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::HashTable() + : m_table(nullptr), + m_tableSize(0), + m_keyCount(0), + m_deletedCount(0), + m_queueFlag(false) +#if DCHECK_IS_ON() + , + m_accessForbidden(false), + m_modifications(0) +#endif +#if DUMP_HASHTABLE_STATS_PER_TABLE + , + m_stats(nullptr) +#endif +{ + static_assert(Allocator::isGarbageCollected || + (!IsPointerToGarbageCollectedType<Key>::value && + !IsPointerToGarbageCollectedType<Value>::value), + "Cannot put raw pointers to garbage-collected classes into an " + "off-heap collection."); +} + +inline unsigned doubleHash(unsigned key) { + key = ~key + (key >> 23); + key ^= (key << 12); + key ^= (key >> 7); + key ^= (key << 2); + key ^= (key >> 20); + return key; +} + +inline unsigned calculateCapacity(unsigned size) { + for (unsigned mask = size; mask; mask >>= 1) + size |= mask; // 00110101010 -> 00111111111 + return (size + 1) * 2; // 00111111111 -> 10000000000 +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +void HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::reserveCapacityForSize(unsigned newSize) { + unsigned newCapacity = calculateCapacity(newSize); + if (newCapacity < KeyTraits::minimumTableSize) + newCapacity = KeyTraits::minimumTableSize; + + if (newCapacity > capacity()) { + RELEASE_ASSERT(!static_cast<int>( + newCapacity >> + 31)); // HashTable capacity should not overflow 32bit int. + rehash(newCapacity, 0); + } +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +inline Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + lookup(const T& key) { + return const_cast<Value*>( + const_cast<const HashTable*>(this)->lookup<HashTranslator>(key)); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +inline const Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + lookup(const T& key) const { + DCHECK(!accessForbidden()); + DCHECK((HashTableKeyChecker< + HashTranslator, KeyTraits, + HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key))); + const ValueType* table = m_table; + if (!table) + return nullptr; + + size_t k = 0; + size_t sizeMask = tableSizeMask(); + unsigned h = HashTranslator::hash(key); + size_t i = h & sizeMask; + + UPDATE_ACCESS_COUNTS(); + + while (1) { + const ValueType* entry = table + i; + + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return entry; + + if (isEmptyBucket(*entry)) + return nullptr; + } else { + if (isEmptyBucket(*entry)) + return nullptr; + + if (!isDeletedBucket(*entry) && + HashTranslator::equal(Extractor::extract(*entry), key)) + return entry; + } + UPDATE_PROBE_COUNTS(); + if (!k) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +inline typename HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::LookupType +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + lookupForWriting(const T& key) { + DCHECK(!accessForbidden()); + DCHECK(m_table); + registerModification(); + + ValueType* table = m_table; + size_t k = 0; + size_t sizeMask = tableSizeMask(); + unsigned h = HashTranslator::hash(key); + size_t i = h & sizeMask; + + UPDATE_ACCESS_COUNTS(); + + ValueType* deletedEntry = nullptr; + + while (1) { + ValueType* entry = table + i; + + if (isEmptyBucket(*entry)) + return LookupType(deletedEntry ? deletedEntry : entry, false); + + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return LookupType(entry, true); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + } else { + if (isDeletedBucket(*entry)) + deletedEntry = entry; + else if (HashTranslator::equal(Extractor::extract(*entry), key)) + return LookupType(entry, true); + } + UPDATE_PROBE_COUNTS(); + if (!k) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +inline typename HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::FullLookupType +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + fullLookupForWriting(const T& key) { + DCHECK(!accessForbidden()); + DCHECK(m_table); + registerModification(); + + ValueType* table = m_table; + size_t k = 0; + size_t sizeMask = tableSizeMask(); + unsigned h = HashTranslator::hash(key); + size_t i = h & sizeMask; + + UPDATE_ACCESS_COUNTS(); + + ValueType* deletedEntry = nullptr; + + while (1) { + ValueType* entry = table + i; + + if (isEmptyBucket(*entry)) + return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h); + + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return makeLookupResult(entry, true, h); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + } else { + if (isDeletedBucket(*entry)) + deletedEntry = entry; + else if (HashTranslator::equal(Extractor::extract(*entry), key)) + return makeLookupResult(entry, true, h); + } + UPDATE_PROBE_COUNTS(); + if (!k) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } +} + +template <bool emptyValueIsZero> +struct HashTableBucketInitializer; + +template <> +struct HashTableBucketInitializer<false> { + STATIC_ONLY(HashTableBucketInitializer); + template <typename Traits, typename Value> + static void initialize(Value& bucket) { + new (NotNull, &bucket) Value(Traits::emptyValue()); + } +}; + +template <> +struct HashTableBucketInitializer<true> { + STATIC_ONLY(HashTableBucketInitializer); + template <typename Traits, typename Value> + static void initialize(Value& bucket) { + // This initializes the bucket without copying the empty value. That + // makes it possible to use this with types that don't support copying. + // The memset to 0 looks like a slow operation but is optimized by the + // compilers. + memset(&bucket, 0, sizeof(bucket)); + } +}; + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +inline void +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + initializeBucket(ValueType& bucket) { + HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize< + Traits>(bucket); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T, typename Extra> +typename HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::AddResult +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + add(T&& key, Extra&& extra) { + DCHECK(!accessForbidden()); + DCHECK(Allocator::isAllocationAllowed()); + if (!m_table) + expand(); + + DCHECK(m_table); + + ValueType* table = m_table; + size_t k = 0; + size_t sizeMask = tableSizeMask(); + unsigned h = HashTranslator::hash(key); + size_t i = h & sizeMask; + + UPDATE_ACCESS_COUNTS(); + + ValueType* deletedEntry = nullptr; + ValueType* entry; + while (1) { + entry = table + i; + + if (isEmptyBucket(*entry)) + break; + + if (HashFunctions::safeToCompareToEmptyOrDeleted) { + if (HashTranslator::equal(Extractor::extract(*entry), key)) + return AddResult(this, entry, false); + + if (isDeletedBucket(*entry)) + deletedEntry = entry; + } else { + if (isDeletedBucket(*entry)) + deletedEntry = entry; + else if (HashTranslator::equal(Extractor::extract(*entry), key)) + return AddResult(this, entry, false); + } + UPDATE_PROBE_COUNTS(); + if (!k) + k = 1 | doubleHash(h); + i = (i + k) & sizeMask; + } + + registerModification(); + + if (deletedEntry) { + // Overwrite any data left over from last use, using placement new or + // memset. + initializeBucket(*deletedEntry); + entry = deletedEntry; + --m_deletedCount; + } + + HashTranslator::translate(*entry, std::forward<T>(key), + std::forward<Extra>(extra)); + DCHECK(!isEmptyOrDeletedBucket(*entry)); + + ++m_keyCount; + + if (shouldExpand()) { + entry = expand(entry); + } else if (Traits::weakHandlingFlag == WeakHandlingInCollections && + shouldShrink()) { + // When weak hash tables are processed by the garbage collector, + // elements with no other strong references to them will have their + // table entries cleared. But no shrinking of the backing store is + // allowed at that time, as allocations are prohibited during that + // GC phase. + // + // With that weak processing taking care of removals, explicit + // remove()s of elements is rarely done. Which implies that the + // weak hash table will never be checked if it can be shrunk. + // + // To prevent weak hash tables with very low load factors from + // developing, we perform it when adding elements instead. + entry = rehash(m_tableSize / 2, entry); + } + + return AddResult(this, entry, true); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T, typename Extra> +typename HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::AddResult +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + addPassingHashCode(T&& key, Extra&& extra) { + DCHECK(!accessForbidden()); + DCHECK(Allocator::isAllocationAllowed()); + if (!m_table) + expand(); + + FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key); + + ValueType* entry = lookupResult.first.first; + bool found = lookupResult.first.second; + unsigned h = lookupResult.second; + + if (found) + return AddResult(this, entry, false); + + registerModification(); + + if (isDeletedBucket(*entry)) { + initializeBucket(*entry); + --m_deletedCount; + } + + HashTranslator::translate(*entry, std::forward<T>(key), + std::forward<Extra>(extra), h); + DCHECK(!isEmptyOrDeletedBucket(*entry)); + + ++m_keyCount; + if (shouldExpand()) + entry = expand(entry); + + return AddResult(this, entry, true); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + reinsert(ValueType&& entry) { + DCHECK(m_table); + registerModification(); + DCHECK(!lookupForWriting(Extractor::extract(entry)).second); + DCHECK( + !isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))); +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::instance().numReinserts); +#endif +#if DUMP_HASHTABLE_STATS_PER_TABLE + ++m_stats->numReinserts; +#endif + Value* newEntry = lookupForWriting(Extractor::extract(entry)).first; + Mover<ValueType, Allocator, + Traits::template NeedsToForbidGCOnMove<>::value>::move(std::move(entry), + *newEntry); + + return newEntry; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +inline typename HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::iterator +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + find(const T& key) { + ValueType* entry = lookup<HashTranslator>(key); + if (!entry) + return end(); + + return makeKnownGoodIterator(entry); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +inline typename HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::const_iterator +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + find(const T& key) const { + ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key); + if (!entry) + return end(); + + return makeKnownGoodConstIterator(entry); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename HashTranslator, typename T> +bool HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::contains(const T& key) const { + return const_cast<HashTable*>(this)->lookup<HashTranslator>(key); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +void HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::remove(ValueType* pos) { + registerModification(); +#if DUMP_HASHTABLE_STATS + atomicIncrement(&HashTableStats::instance().numRemoves); +#endif +#if DUMP_HASHTABLE_STATS_PER_TABLE + ++m_stats->numRemoves; +#endif + + enterAccessForbiddenScope(); + deleteBucket(*pos); + leaveAccessForbiddenScope(); + ++m_deletedCount; + --m_keyCount; + + if (shouldShrink()) + shrink(); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +inline void +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + remove(iterator it) { + if (it == end()) + return; + remove(const_cast<ValueType*>(it.m_iterator.m_position)); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +inline void +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + remove(const_iterator it) { + if (it == end()) + return; + remove(const_cast<ValueType*>(it.m_position)); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +inline void +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + remove(KeyPeekInType key) { + remove(find(key)); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + allocateTable(unsigned size) { + size_t allocSize = size * sizeof(ValueType); + ValueType* result; + // Assert that we will not use memset on things with a vtable entry. The + // compiler will also check this on some platforms. We would like to check + // this on the whole value (key-value pair), but std::is_polymorphic will + // return false for a pair of two types, even if one of the components is + // polymorphic. + static_assert( + !Traits::emptyValueIsZero || !std::is_polymorphic<KeyType>::value, + "empty value cannot be zero for things with a vtable"); + static_assert(Allocator::isGarbageCollected || + ((!AllowsOnlyPlacementNew<KeyType>::value || + !IsTraceable<KeyType>::value) && + (!AllowsOnlyPlacementNew<ValueType>::value || + !IsTraceable<ValueType>::value)), + "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " + "have trace methods into an off-heap HashTable"); + + if (Traits::emptyValueIsZero) { + result = Allocator::template allocateZeroedHashTableBacking<ValueType, + HashTable>( + allocSize); + } else { + result = Allocator::template allocateHashTableBacking<ValueType, HashTable>( + allocSize); + for (unsigned i = 0; i < size; i++) + initializeBucket(result[i]); + } + return result; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +void HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, + unsigned size) { + if (!IsTriviallyDestructible<ValueType>::value) { + for (unsigned i = 0; i < size; ++i) { + // This code is called when the hash table is cleared or resized. We + // have allocated a new backing store and we need to run the + // destructors on the old backing store, as it is being freed. If we + // are GCing we need to both call the destructor and mark the bucket + // as deleted, otherwise the destructor gets called again when the + // GC finds the backing store. With the default allocator it's + // enough to call the destructor, since we will free the memory + // explicitly and we won't see the memory with the bucket again. + if (Allocator::isGarbageCollected) { + if (!isEmptyOrDeletedBucket(table[i])) + deleteBucket(table[i]); + } else { + if (!isDeletedBucket(table[i])) + table[i].~ValueType(); + } + } + } + Allocator::freeHashTableBacking(table); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + expand(Value* entry) { + unsigned newSize; + if (!m_tableSize) { + newSize = KeyTraits::minimumTableSize; + } else if (mustRehashInPlace()) { + newSize = m_tableSize; + } else { + newSize = m_tableSize * 2; + RELEASE_ASSERT(newSize > m_tableSize); + } + + return rehash(newSize, entry); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + expandBuffer(unsigned newTableSize, Value* entry, bool& success) { + success = false; + DCHECK_LT(m_tableSize, newTableSize); + if (!Allocator::expandHashTableBacking(m_table, + newTableSize * sizeof(ValueType))) + return nullptr; + + success = true; + + Value* newEntry = nullptr; + unsigned oldTableSize = m_tableSize; + ValueType* originalTable = m_table; + + ValueType* temporaryTable = allocateTable(oldTableSize); + for (unsigned i = 0; i < oldTableSize; i++) { + if (&m_table[i] == entry) + newEntry = &temporaryTable[i]; + if (isEmptyOrDeletedBucket(m_table[i])) { + DCHECK_NE(&m_table[i], entry); + if (Traits::emptyValueIsZero) { + memset(&temporaryTable[i], 0, sizeof(ValueType)); + } else { + initializeBucket(temporaryTable[i]); + } + } else { + Mover<ValueType, Allocator, + Traits::template NeedsToForbidGCOnMove<>::value>:: + move(std::move(m_table[i]), temporaryTable[i]); + } + } + m_table = temporaryTable; + + if (Traits::emptyValueIsZero) { + memset(originalTable, 0, newTableSize * sizeof(ValueType)); + } else { + for (unsigned i = 0; i < newTableSize; i++) + initializeBucket(originalTable[i]); + } + newEntry = rehashTo(originalTable, newTableSize, newEntry); + + enterAccessForbiddenScope(); + deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize); + leaveAccessForbiddenScope(); + + return newEntry; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) { + unsigned oldTableSize = m_tableSize; + ValueType* oldTable = m_table; + +#if DUMP_HASHTABLE_STATS + if (oldTableSize != 0) + atomicIncrement(&HashTableStats::instance().numRehashes); +#endif + +#if DUMP_HASHTABLE_STATS_PER_TABLE + if (oldTableSize != 0) + ++m_stats->numRehashes; +#endif + + m_table = newTable; + m_tableSize = newTableSize; + + Value* newEntry = nullptr; + for (unsigned i = 0; i != oldTableSize; ++i) { + if (isEmptyOrDeletedBucket(oldTable[i])) { + DCHECK_NE(&oldTable[i], entry); + continue; + } + Value* reinsertedEntry = reinsert(std::move(oldTable[i])); + if (&oldTable[i] == entry) { + DCHECK(!newEntry); + newEntry = reinsertedEntry; + } + } + + m_deletedCount = 0; + +#if DUMP_HASHTABLE_STATS_PER_TABLE + if (!m_stats) + m_stats = HashTableStatsPtr<Allocator>::create(); +#endif + + return newEntry; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +Value* +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + rehash(unsigned newTableSize, Value* entry) { + unsigned oldTableSize = m_tableSize; + ValueType* oldTable = m_table; + +#if DUMP_HASHTABLE_STATS + if (oldTableSize != 0) + atomicIncrement(&HashTableStats::instance().numRehashes); +#endif + +#if DUMP_HASHTABLE_STATS_PER_TABLE + if (oldTableSize != 0) + ++m_stats->numRehashes; +#endif + + // The Allocator::isGarbageCollected check is not needed. The check is just + // a static hint for a compiler to indicate that Base::expandBuffer returns + // false if Allocator is a PartitionAllocator. + if (Allocator::isGarbageCollected && newTableSize > oldTableSize) { + bool success; + Value* newEntry = expandBuffer(newTableSize, entry, success); + if (success) + return newEntry; + } + + ValueType* newTable = allocateTable(newTableSize); + Value* newEntry = rehashTo(newTable, newTableSize, entry); + + enterAccessForbiddenScope(); + deleteAllBucketsAndDeallocate(oldTable, oldTableSize); + leaveAccessForbiddenScope(); + + return newEntry; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +void HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::clear() { + registerModification(); + if (!m_table) + return; + + enterAccessForbiddenScope(); + deleteAllBucketsAndDeallocate(m_table, m_tableSize); + leaveAccessForbiddenScope(); + m_table = nullptr; + m_tableSize = 0; + m_keyCount = 0; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + HashTable(const HashTable& other) + : m_table(nullptr), + m_tableSize(0), + m_keyCount(0), + m_deletedCount(0), + m_queueFlag(false) +#if DCHECK_IS_ON() + , + m_accessForbidden(false), + m_modifications(0) +#endif +#if DUMP_HASHTABLE_STATS_PER_TABLE + , + m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) +#endif +{ + if (other.size()) + reserveCapacityForSize(other.size()); + // Copy the hash table the dumb way, by adding each element to the new + // table. It might be more efficient to copy the table slots, but it's not + // clear that efficiency is needed. + for (const auto& element : other) + add(element); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: + HashTable(HashTable&& other) + : m_table(nullptr), + m_tableSize(0), + m_keyCount(0), + m_deletedCount(0), + m_queueFlag(false) +#if DCHECK_IS_ON() + , + m_accessForbidden(false), + m_modifications(0) +#endif +#if DUMP_HASHTABLE_STATS_PER_TABLE + , + m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) +#endif +{ + swap(other); +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +void HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::swap(HashTable& other) { + DCHECK(!accessForbidden()); + std::swap(m_table, other.m_table); + std::swap(m_tableSize, other.m_tableSize); + std::swap(m_keyCount, other.m_keyCount); + // std::swap does not work for bit fields. + unsigned deleted = m_deletedCount; + m_deletedCount = other.m_deletedCount; + other.m_deletedCount = deleted; + DCHECK(!m_queueFlag); + DCHECK(!other.m_queueFlag); + +#if DCHECK_IS_ON() + std::swap(m_modifications, other.m_modifications); +#endif + +#if DUMP_HASHTABLE_STATS_PER_TABLE + HashTableStatsPtr<Allocator>::swap(m_stats, other.m_stats); +#endif +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: +operator=(const HashTable& other) { + HashTable tmp(other); + swap(tmp); + return *this; +} + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& +HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: +operator=(HashTable&& other) { + swap(other); + return *this; +} + +template <WeakHandlingFlag weakHandlingFlag, + typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +struct WeakProcessingHashTableHelper; + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, + Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> { + STATIC_ONLY(WeakProcessingHashTableHelper); + static void process(typename Allocator::Visitor* visitor, void* closure) {} + static void ephemeronIteration(typename Allocator::Visitor* visitor, + void* closure) {} + static void ephemeronIterationDone(typename Allocator::Visitor* visitor, + void* closure) {} +}; + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +struct WeakProcessingHashTableHelper<WeakHandlingInCollections, + Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator> { + STATIC_ONLY(WeakProcessingHashTableHelper); + + using HashTableType = HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>; + using ValueType = typename HashTableType::ValueType; + + // Used for purely weak and for weak-and-strong tables (ephemerons). + static void process(typename Allocator::Visitor* visitor, void* closure) { + HashTableType* table = reinterpret_cast<HashTableType*>(closure); + if (!table->m_table) + return; + // Now perform weak processing (this is a no-op if the backing was + // accessible through an iterator and was already marked strongly). + for (ValueType* element = table->m_table + table->m_tableSize - 1; + element >= table->m_table; element--) { + if (!HashTableType::isEmptyOrDeletedBucket(*element)) { + // At this stage calling trace can make no difference + // (everything is already traced), but we use the return value + // to remove things from the collection. + + // FIXME: This should be rewritten so that this can check if the + // element is dead without calling trace, which is semantically + // not correct to be called in weak processing stage. + if (TraceInCollectionTrait<WeakHandlingInCollections, + WeakPointersActWeak, ValueType, + Traits>::trace(visitor, *element)) { + table->registerModification(); + HashTableType::deleteBucket(*element); // Also calls the destructor. + table->m_deletedCount++; + table->m_keyCount--; + // We don't rehash the backing until the next add or delete, + // because that would cause allocation during GC. + } + } + } + } + + // Called repeatedly for tables that have both weak and strong pointers. + static void ephemeronIteration(typename Allocator::Visitor* visitor, + void* closure) { + HashTableType* table = reinterpret_cast<HashTableType*>(closure); + DCHECK(table->m_table); + // Check the hash table for elements that we now know will not be + // removed by weak processing. Those elements need to have their strong + // pointers traced. + for (ValueType* element = table->m_table + table->m_tableSize - 1; + element >= table->m_table; element--) { + if (!HashTableType::isEmptyOrDeletedBucket(*element)) + TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, + ValueType, Traits>::trace(visitor, *element); + } + } + + // Called when the ephemeron iteration is done and before running the per + // thread weak processing. It is guaranteed to be called before any thread + // is resumed. + static void ephemeronIterationDone(typename Allocator::Visitor* visitor, + void* closure) { + HashTableType* table = reinterpret_cast<HashTableType*>(closure); +#if DCHECK_IS_ON() + DCHECK(Allocator::weakTableRegistered(visitor, table)); +#endif + table->clearEnqueued(); + } +}; + +template <typename Key, + typename Value, + typename Extractor, + typename HashFunctions, + typename Traits, + typename KeyTraits, + typename Allocator> +template <typename VisitorDispatcher> +void HashTable<Key, + Value, + Extractor, + HashFunctions, + Traits, + KeyTraits, + Allocator>::trace(VisitorDispatcher visitor) { +#if DUMP_HASHTABLE_STATS_PER_TABLE + Allocator::markNoTracing(visitor, m_stats); +#endif + + // If someone else already marked the backing and queued up the trace and/or + // weak callback then we are done. This optimization does not happen for + // ListHashSet since its iterator does not point at the backing. + if (!m_table || Allocator::isHeapObjectAlive(m_table)) + return; + + // Normally, we mark the backing store without performing trace. This means + // it is marked live, but the pointers inside it are not marked. Instead we + // will mark the pointers below. However, for backing stores that contain + // weak pointers the handling is rather different. We don't mark the + // backing store here, so the marking GC will leave the backing unmarked. If + // the backing is found in any other way than through its HashTable (ie from + // an iterator) then the mark bit will be set and the pointers will be + // marked strongly, avoiding problems with iterating over things that + // disappear due to weak processing while we are iterating over them. We + // register the backing store pointer for delayed marking which will take + // place after we know if the backing is reachable from elsewhere. We also + // register a weakProcessing callback which will perform weak processing if + // needed. + if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) { + Allocator::markNoTracing(visitor, m_table); + } else { + Allocator::registerDelayedMarkNoTracing(visitor, m_table); + // Since we're delaying marking this HashTable, it is possible that the + // registerWeakMembers is called multiple times (in rare + // cases). However, it shouldn't cause any issue. + Allocator::registerWeakMembers( + visitor, this, + WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, + Extractor, HashFunctions, Traits, + KeyTraits, Allocator>::process); + } + // If the backing store will be moved by sweep compaction, register the + // table reference pointing to the backing store object, so that the + // reference is updated upon object relocation. A no-op if not enabled + // by the visitor. + Allocator::registerBackingStoreReference(visitor, &m_table); + if (!IsTraceableInCollectionTrait<Traits>::value) + return; + if (Traits::weakHandlingFlag == WeakHandlingInCollections) { + // If we have both strong and weak pointers in the collection then + // we queue up the collection for fixed point iteration a la + // Ephemerons: + // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also + // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak +#if DCHECK_IS_ON() + DCHECK(!enqueued() || Allocator::weakTableRegistered(visitor, this)); +#endif + if (!enqueued()) { + Allocator::registerWeakTable( + visitor, this, + WeakProcessingHashTableHelper< + Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, + Traits, KeyTraits, Allocator>::ephemeronIteration, + WeakProcessingHashTableHelper< + Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, + Traits, KeyTraits, Allocator>::ephemeronIterationDone); + setEnqueued(); + } + // We don't need to trace the elements here, since registering as a + // weak table above will cause them to be traced (perhaps several + // times). It's better to wait until everything else is traced + // before tracing the elements for the first time; this may reduce + // (by one) the number of iterations needed to get to a fixed point. + return; + } + for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; + element--) { + if (!isEmptyOrDeletedBucket(*element)) + Allocator::template trace<VisitorDispatcher, ValueType, Traits>(visitor, + *element); + } +} + +// iterator adapters + +template <typename HashTableType, typename Traits> +struct HashTableConstIteratorAdapter { + STACK_ALLOCATED(); + HashTableConstIteratorAdapter() {} + HashTableConstIteratorAdapter( + const typename HashTableType::const_iterator& impl) + : m_impl(impl) {} + typedef typename Traits::IteratorConstGetType GetType; + typedef + typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType; + + GetType get() const { + return const_cast<GetType>(SourceGetType(m_impl.get())); + } + typename Traits::IteratorConstReferenceType operator*() const { + return Traits::getToReferenceConstConversion(get()); + } + GetType operator->() const { return get(); } + + HashTableConstIteratorAdapter& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + typename HashTableType::const_iterator m_impl; +}; + +template <typename HashTable, typename Traits> +std::ostream& operator<<( + std::ostream& stream, + const HashTableConstIteratorAdapter<HashTable, Traits>& iterator) { + return stream << iterator.m_impl; +} + +template <typename HashTableType, typename Traits> +struct HashTableIteratorAdapter { + STACK_ALLOCATED(); + typedef typename Traits::IteratorGetType GetType; + typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType; + + HashTableIteratorAdapter() {} + HashTableIteratorAdapter(const typename HashTableType::iterator& impl) + : m_impl(impl) {} + + GetType get() const { + return const_cast<GetType>(SourceGetType(m_impl.get())); + } + typename Traits::IteratorReferenceType operator*() const { + return Traits::getToReferenceConversion(get()); + } + GetType operator->() const { return get(); } + + HashTableIteratorAdapter& operator++() { + ++m_impl; + return *this; + } + // postfix ++ intentionally omitted + + operator HashTableConstIteratorAdapter<HashTableType, Traits>() { + typename HashTableType::const_iterator i = m_impl; + return i; + } + + typename HashTableType::iterator m_impl; +}; + +template <typename HashTable, typename Traits> +std::ostream& operator<<( + std::ostream& stream, + const HashTableIteratorAdapter<HashTable, Traits>& iterator) { + return stream << iterator.m_impl; +} + +template <typename T, typename U> +inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, + const HashTableConstIteratorAdapter<T, U>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U> +inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, + const HashTableConstIteratorAdapter<T, U>& b) { + return a.m_impl != b.m_impl; +} + +template <typename T, typename U> +inline bool operator==(const HashTableIteratorAdapter<T, U>& a, + const HashTableIteratorAdapter<T, U>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U> +inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, + const HashTableIteratorAdapter<T, U>& b) { + return a.m_impl != b.m_impl; +} + +// All 4 combinations of ==, != and Const,non const. +template <typename T, typename U> +inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, + const HashTableIteratorAdapter<T, U>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U> +inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, + const HashTableIteratorAdapter<T, U>& b) { + return a.m_impl != b.m_impl; +} + +template <typename T, typename U> +inline bool operator==(const HashTableIteratorAdapter<T, U>& a, + const HashTableConstIteratorAdapter<T, U>& b) { + return a.m_impl == b.m_impl; +} + +template <typename T, typename U> +inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, + const HashTableConstIteratorAdapter<T, U>& b) { + return a.m_impl != b.m_impl; +} + +template <typename Collection1, typename Collection2> +inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) { + if (collection.isEmpty() || toBeRemoved.isEmpty()) + return; + typedef typename Collection2::const_iterator CollectionIterator; + CollectionIterator end(toBeRemoved.end()); + for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it) + collection.erase(*it); +} + +} // namespace WTF + +#include "wtf/HashIterators.h" + +#endif // WTF_HashTable_h
diff --git a/third_party/WebKit/Source/platform/wtf/HashTraits.h b/third_party/WebKit/Source/platform/wtf/HashTraits.h new file mode 100644 index 0000000..f62b5edb --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/HashTraits.h
@@ -0,0 +1,429 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights + * reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_HashTraits_h +#define WTF_HashTraits_h + +#include "platform/wtf/Forward.h" +#include "platform/wtf/HashFunctions.h" +#include "platform/wtf/HashTableDeletedValueType.h" +#include "platform/wtf/StdLibExtras.h" +#include "platform/wtf/TypeTraits.h" +#include <limits> +#include <memory> +#include <string.h> // For memset. +#include <type_traits> +#include <utility> + +namespace WTF { + +template <bool isInteger, typename T> +struct GenericHashTraitsBase; +template <typename T> +struct HashTraits; + +enum ShouldWeakPointersBeMarkedStrongly { + WeakPointersActStrong, + WeakPointersActWeak +}; + +template <typename T> +struct GenericHashTraitsBase<false, T> { + // The emptyValueIsZero flag is used to optimize allocation of empty hash + // tables with zeroed memory. + static const bool emptyValueIsZero = false; + + // The hasIsEmptyValueFunction flag allows the hash table to automatically + // generate code to check for the empty value when it can be done with the + // equality operator, but allows custom functions for cases like String that + // need them. + static const bool hasIsEmptyValueFunction = false; + +// The starting table size. Can be overridden when we know beforehand that a +// hash table will have at least N entries. +#if defined(MEMORY_SANITIZER_INITIAL_SIZE) + static const unsigned minimumTableSize = 1; +#else + static const unsigned minimumTableSize = 8; +#endif + + // When a hash table backing store is traced, its elements will be + // traced if their class type has a trace method. However, weak-referenced + // elements should not be traced then, but handled by the weak processing + // phase that follows. + template <typename U = void> + struct IsTraceableInCollection { + static const bool value = IsTraceable<T>::value && !IsWeak<T>::value; + }; + + // The NeedsToForbidGCOnMove flag is used to make the hash table move + // operations safe when GC is enabled: if a move constructor invokes + // an allocation triggering the GC then it should be invoked within GC + // forbidden scope. + template <typename U = void> + struct NeedsToForbidGCOnMove { + // TODO(yutak): Consider using of std:::is_trivially_move_constructible + // when it is accessible. + static const bool value = !std::is_pod<T>::value; + }; + + static const WeakHandlingFlag weakHandlingFlag = + IsWeak<T>::value ? WeakHandlingInCollections + : NoWeakHandlingInCollections; +}; + +// Default integer traits disallow both 0 and -1 as keys (max value instead of +// -1 for unsigned). +template <typename T> +struct GenericHashTraitsBase<true, T> : GenericHashTraitsBase<false, T> { + static const bool emptyValueIsZero = true; + static void constructDeletedValue(T& slot, bool) { + slot = static_cast<T>(-1); + } + static bool isDeletedValue(T value) { return value == static_cast<T>(-1); } +}; + +template <typename T> +struct GenericHashTraits + : GenericHashTraitsBase<std::is_integral<T>::value, T> { + typedef T TraitType; + typedef T EmptyValueType; + + static T emptyValue() { return T(); } + + // Type for functions that do not take ownership, such as contains. + typedef const T& PeekInType; + typedef T* IteratorGetType; + typedef const T* IteratorConstGetType; + typedef T& IteratorReferenceType; + typedef const T& IteratorConstReferenceType; + static IteratorReferenceType getToReferenceConversion(IteratorGetType x) { + return *x; + } + static IteratorConstReferenceType getToReferenceConstConversion( + IteratorConstGetType x) { + return *x; + } + + template <typename IncomingValueType> + static void store(IncomingValueType&& value, T& storage) { + storage = std::forward<IncomingValueType>(value); + } + + // Type for return value of functions that do not transfer ownership, such + // as get. + // FIXME: We could change this type to const T& for better performance if we + // figured out a way to handle the return value from emptyValue, which is a + // temporary. + typedef T PeekOutType; + static const T& peek(const T& value) { return value; } +}; + +template <typename T> +struct HashTraits : GenericHashTraits<T> {}; + +template <typename T> +struct FloatHashTraits : GenericHashTraits<T> { + static T emptyValue() { return std::numeric_limits<T>::infinity(); } + static void constructDeletedValue(T& slot, bool) { + slot = -std::numeric_limits<T>::infinity(); + } + static bool isDeletedValue(T value) { + return value == -std::numeric_limits<T>::infinity(); + } +}; + +template <> +struct HashTraits<float> : FloatHashTraits<float> {}; +template <> +struct HashTraits<double> : FloatHashTraits<double> {}; + +// Default unsigned traits disallow both 0 and max as keys -- use these traits +// to allow zero and disallow max - 1. +template <typename T> +struct UnsignedWithZeroKeyHashTraits : GenericHashTraits<T> { + static const bool emptyValueIsZero = false; + static T emptyValue() { return std::numeric_limits<T>::max(); } + static void constructDeletedValue(T& slot, bool) { + slot = std::numeric_limits<T>::max() - 1; + } + static bool isDeletedValue(T value) { + return value == std::numeric_limits<T>::max() - 1; + } +}; + +template <typename P> +struct HashTraits<P*> : GenericHashTraits<P*> { + static const bool emptyValueIsZero = true; + static void constructDeletedValue(P*& slot, bool) { + slot = reinterpret_cast<P*>(-1); + } + static bool isDeletedValue(P* value) { + return value == reinterpret_cast<P*>(-1); + } +}; + +template <typename T> +struct SimpleClassHashTraits : GenericHashTraits<T> { + static const bool emptyValueIsZero = true; + template <typename U = void> + struct NeedsToForbidGCOnMove { + static const bool value = false; + }; + static void constructDeletedValue(T& slot, bool) { + new (NotNull, &slot) T(HashTableDeletedValue); + } + static bool isDeletedValue(const T& value) { + return value.isHashTableDeletedValue(); + } +}; + +template <typename P> +struct HashTraits<RefPtr<P>> : SimpleClassHashTraits<RefPtr<P>> { + typedef std::nullptr_t EmptyValueType; + static EmptyValueType emptyValue() { return nullptr; } + + static const bool hasIsEmptyValueFunction = true; + static bool isEmptyValue(const RefPtr<P>& value) { return !value; } + + typedef RefPtrValuePeeker<P> PeekInType; + typedef RefPtr<P>* IteratorGetType; + typedef const RefPtr<P>* IteratorConstGetType; + typedef RefPtr<P>& IteratorReferenceType; + typedef const RefPtr<P>& IteratorConstReferenceType; + static IteratorReferenceType getToReferenceConversion(IteratorGetType x) { + return *x; + } + static IteratorConstReferenceType getToReferenceConstConversion( + IteratorConstGetType x) { + return *x; + } + + static void store(PassRefPtr<P> value, RefPtr<P>& storage) { + storage = std::move(value); + } + + typedef P* PeekOutType; + static PeekOutType peek(const RefPtr<P>& value) { return value.get(); } + static PeekOutType peek(std::nullptr_t) { return 0; } +}; + +template <typename T> +struct HashTraits<std::unique_ptr<T>> + : SimpleClassHashTraits<std::unique_ptr<T>> { + using EmptyValueType = std::nullptr_t; + static EmptyValueType emptyValue() { return nullptr; } + + static const bool hasIsEmptyValueFunction = true; + static bool isEmptyValue(const std::unique_ptr<T>& value) { return !value; } + + using PeekInType = T*; + + static void store(std::unique_ptr<T>&& value, std::unique_ptr<T>& storage) { + storage = std::move(value); + } + + using PeekOutType = T*; + static PeekOutType peek(const std::unique_ptr<T>& value) { + return value.get(); + } + static PeekOutType peek(std::nullptr_t) { return nullptr; } + + static void constructDeletedValue(std::unique_ptr<T>& slot, bool) { + // Dirty trick: implant an invalid pointer to unique_ptr. Destructor isn't + // called for deleted buckets, so this is okay. + new (NotNull, &slot) std::unique_ptr<T>(reinterpret_cast<T*>(1u)); + } + static bool isDeletedValue(const std::unique_ptr<T>& value) { + return value.get() == reinterpret_cast<T*>(1u); + } +}; + +template <> +struct HashTraits<String> : SimpleClassHashTraits<String> { + static const bool hasIsEmptyValueFunction = true; + static bool isEmptyValue(const String&); +}; + +// This struct template is an implementation detail of the +// isHashTraitsEmptyValue function, which selects either the emptyValue function +// or the isEmptyValue function to check for empty values. +template <typename Traits, bool hasEmptyValueFunction> +struct HashTraitsEmptyValueChecker; +template <typename Traits> +struct HashTraitsEmptyValueChecker<Traits, true> { + template <typename T> + static bool isEmptyValue(const T& value) { + return Traits::isEmptyValue(value); + } +}; +template <typename Traits> +struct HashTraitsEmptyValueChecker<Traits, false> { + template <typename T> + static bool isEmptyValue(const T& value) { + return value == Traits::emptyValue(); + } +}; +template <typename Traits, typename T> +inline bool isHashTraitsEmptyValue(const T& value) { + return HashTraitsEmptyValueChecker< + Traits, Traits::hasIsEmptyValueFunction>::isEmptyValue(value); +} + +template <typename FirstTraitsArg, typename SecondTraitsArg> +struct PairHashTraits + : GenericHashTraits<std::pair<typename FirstTraitsArg::TraitType, + typename SecondTraitsArg::TraitType>> { + typedef FirstTraitsArg FirstTraits; + typedef SecondTraitsArg SecondTraits; + typedef std::pair<typename FirstTraits::TraitType, + typename SecondTraits::TraitType> + TraitType; + typedef std::pair<typename FirstTraits::EmptyValueType, + typename SecondTraits::EmptyValueType> + EmptyValueType; + + static const bool emptyValueIsZero = + FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero; + static EmptyValueType emptyValue() { + return std::make_pair(FirstTraits::emptyValue(), + SecondTraits::emptyValue()); + } + + static const bool hasIsEmptyValueFunction = + FirstTraits::hasIsEmptyValueFunction || + SecondTraits::hasIsEmptyValueFunction; + static bool isEmptyValue(const TraitType& value) { + return isHashTraitsEmptyValue<FirstTraits>(value.first) && + isHashTraitsEmptyValue<SecondTraits>(value.second); + } + + static const unsigned minimumTableSize = FirstTraits::minimumTableSize; + + static void constructDeletedValue(TraitType& slot, bool zeroValue) { + FirstTraits::constructDeletedValue(slot.first, zeroValue); + // For GC collections the memory for the backing is zeroed when it is + // allocated, and the constructors may take advantage of that, + // especially if a GC occurs during insertion of an entry into the + // table. This slot is being marked deleted, but If the slot is reused + // at a later point, the same assumptions around memory zeroing must + // hold as they did at the initial allocation. Therefore we zero the + // value part of the slot here for GC collections. + if (zeroValue) + memset(reinterpret_cast<void*>(&slot.second), 0, sizeof(slot.second)); + } + static bool isDeletedValue(const TraitType& value) { + return FirstTraits::isDeletedValue(value.first); + } +}; + +template <typename First, typename Second> +struct HashTraits<std::pair<First, Second>> + : public PairHashTraits<HashTraits<First>, HashTraits<Second>> {}; + +template <typename KeyTypeArg, typename ValueTypeArg> +struct KeyValuePair { + typedef KeyTypeArg KeyType; + + template <typename IncomingKeyType, typename IncomingValueType> + KeyValuePair(IncomingKeyType&& key, IncomingValueType&& value) + : key(std::forward<IncomingKeyType>(key)), + value(std::forward<IncomingValueType>(value)) {} + + template <typename OtherKeyType, typename OtherValueType> + KeyValuePair(KeyValuePair<OtherKeyType, OtherValueType>&& other) + : key(std::move(other.key)), value(std::move(other.value)) {} + + KeyTypeArg key; + ValueTypeArg value; +}; + +template <typename KeyTraitsArg, typename ValueTraitsArg> +struct KeyValuePairHashTraits + : GenericHashTraits<KeyValuePair<typename KeyTraitsArg::TraitType, + typename ValueTraitsArg::TraitType>> { + typedef KeyTraitsArg KeyTraits; + typedef ValueTraitsArg ValueTraits; + typedef KeyValuePair<typename KeyTraits::TraitType, + typename ValueTraits::TraitType> + TraitType; + typedef KeyValuePair<typename KeyTraits::EmptyValueType, + typename ValueTraits::EmptyValueType> + EmptyValueType; + + static const bool emptyValueIsZero = + KeyTraits::emptyValueIsZero && ValueTraits::emptyValueIsZero; + static EmptyValueType emptyValue() { + return KeyValuePair<typename KeyTraits::EmptyValueType, + typename ValueTraits::EmptyValueType>( + KeyTraits::emptyValue(), ValueTraits::emptyValue()); + } + + template <typename U = void> + struct IsTraceableInCollection { + static const bool value = IsTraceableInCollectionTrait<KeyTraits>::value || + IsTraceableInCollectionTrait<ValueTraits>::value; + }; + + template <typename U = void> + struct NeedsToForbidGCOnMove { + static const bool value = + KeyTraits::template NeedsToForbidGCOnMove<>::value || + ValueTraits::template NeedsToForbidGCOnMove<>::value; + }; + + static const WeakHandlingFlag weakHandlingFlag = + (KeyTraits::weakHandlingFlag == WeakHandlingInCollections || + ValueTraits::weakHandlingFlag == WeakHandlingInCollections) + ? WeakHandlingInCollections + : NoWeakHandlingInCollections; + + static const unsigned minimumTableSize = KeyTraits::minimumTableSize; + + static void constructDeletedValue(TraitType& slot, bool zeroValue) { + KeyTraits::constructDeletedValue(slot.key, zeroValue); + // See similar code in this file for why we need to do this. + if (zeroValue) + memset(reinterpret_cast<void*>(&slot.value), 0, sizeof(slot.value)); + } + static bool isDeletedValue(const TraitType& value) { + return KeyTraits::isDeletedValue(value.key); + } +}; + +template <typename Key, typename Value> +struct HashTraits<KeyValuePair<Key, Value>> + : public KeyValuePairHashTraits<HashTraits<Key>, HashTraits<Value>> {}; + +template <typename T> +struct NullableHashTraits : public HashTraits<T> { + static const bool emptyValueIsZero = false; + static T emptyValue() { return reinterpret_cast<T>(1); } +}; + +} // namespace WTF + +using WTF::HashTraits; +using WTF::PairHashTraits; +using WTF::NullableHashTraits; +using WTF::SimpleClassHashTraits; + +#endif // WTF_HashTraits_h
diff --git a/third_party/WebKit/Source/platform/wtf/LinkedHashSet.h b/third_party/WebKit/Source/platform/wtf/LinkedHashSet.h new file mode 100644 index 0000000..e08fdde --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/LinkedHashSet.h
@@ -0,0 +1,938 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights + * reserved. + * Copyright (C) 2011, Benjamin Poulain <ikipou@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_LinkedHashSet_h +#define WTF_LinkedHashSet_h + +#include "platform/wtf/AddressSanitizer.h" +#include "platform/wtf/HashSet.h" +#include "platform/wtf/allocator/PartitionAllocator.h" + +namespace WTF { + +// LinkedHashSet: Just like HashSet, this class provides a Set +// interface - a collection of unique objects with O(1) insertion, +// removal and test for containership. However, it also has an +// order - iterating it will always give back values in the order +// in which they are added. + +// Unlike ListHashSet, but like most WTF collections, iteration is NOT safe +// against mutation of the LinkedHashSet. + +template <typename Value, + typename HashFunctions, + typename HashTraits, + typename Allocator> +class LinkedHashSet; + +template <typename LinkedHashSet> +class LinkedHashSetIterator; +template <typename LinkedHashSet> +class LinkedHashSetConstIterator; +template <typename LinkedHashSet> +class LinkedHashSetReverseIterator; +template <typename LinkedHashSet> +class LinkedHashSetConstReverseIterator; + +template <typename Value, typename HashFunctions, typename Allocator> +struct LinkedHashSetTranslator; +template <typename Value, typename Allocator> +struct LinkedHashSetExtractor; +template <typename Value, typename ValueTraits, typename Allocator> +struct LinkedHashSetTraits; + +class LinkedHashSetNodeBase { + DISALLOW_NEW(); + + public: + LinkedHashSetNodeBase() : m_prev(this), m_next(this) {} + + NO_SANITIZE_ADDRESS + void unlink() { + if (!m_next) + return; + DCHECK(m_prev); + DCHECK(m_next->m_prev == this); + DCHECK(m_prev->m_next == this); + m_next->m_prev = m_prev; + m_prev->m_next = m_next; + } + + ~LinkedHashSetNodeBase() { unlink(); } + + void insertBefore(LinkedHashSetNodeBase& other) { + other.m_next = this; + other.m_prev = m_prev; + m_prev->m_next = &other; + m_prev = &other; + DCHECK(other.m_next); + DCHECK(other.m_prev); + DCHECK(m_next); + DCHECK(m_prev); + } + + void insertAfter(LinkedHashSetNodeBase& other) { + other.m_prev = this; + other.m_next = m_next; + m_next->m_prev = &other; + m_next = &other; + DCHECK(other.m_next); + DCHECK(other.m_prev); + DCHECK(m_next); + DCHECK(m_prev); + } + + LinkedHashSetNodeBase(LinkedHashSetNodeBase* prev, + LinkedHashSetNodeBase* next) + : m_prev(prev), m_next(next) { + DCHECK((prev && next) || (!prev && !next)); + } + + LinkedHashSetNodeBase* m_prev; + LinkedHashSetNodeBase* m_next; + + protected: + // If we take a copy of a node we can't copy the next and prev pointers, + // since they point to something that does not point at us. This is used + // inside the shouldExpand() "if" in HashTable::add. + LinkedHashSetNodeBase(const LinkedHashSetNodeBase& other) + : m_prev(0), m_next(0) {} + + LinkedHashSetNodeBase(LinkedHashSetNodeBase&& other) + : m_prev(other.m_prev), m_next(other.m_next) { + other.m_prev = nullptr; + other.m_next = nullptr; + if (m_next) { + m_prev->m_next = this; + m_next->m_prev = this; + } + } + + private: + // Should not be used. + LinkedHashSetNodeBase& operator=(const LinkedHashSetNodeBase& other); +}; + +template <typename ValueArg, typename Allocator> +class LinkedHashSetNode : public LinkedHashSetNodeBase { + DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); + + public: + LinkedHashSetNode(const ValueArg& value, + LinkedHashSetNodeBase* prev, + LinkedHashSetNodeBase* next) + : LinkedHashSetNodeBase(prev, next), m_value(value) {} + + LinkedHashSetNode(LinkedHashSetNode&& other) + : LinkedHashSetNodeBase(std::move(other)), + m_value(std::move(other.m_value)) {} + + ValueArg m_value; + + private: + WTF_MAKE_NONCOPYABLE(LinkedHashSetNode); +}; + +template <typename ValueArg, + typename HashFunctions = typename DefaultHash<ValueArg>::Hash, + typename TraitsArg = HashTraits<ValueArg>, + typename Allocator = PartitionAllocator> +class LinkedHashSet { + USE_ALLOCATOR(LinkedHashSet, Allocator); + + private: + typedef ValueArg Value; + typedef TraitsArg Traits; + typedef LinkedHashSetNode<Value, Allocator> Node; + typedef LinkedHashSetNodeBase NodeBase; + typedef LinkedHashSetTranslator<Value, HashFunctions, Allocator> + NodeHashFunctions; + typedef LinkedHashSetTraits<Value, Traits, Allocator> NodeHashTraits; + + typedef HashTable<Node, + Node, + IdentityExtractor, + NodeHashFunctions, + NodeHashTraits, + NodeHashTraits, + Allocator> + ImplType; + + public: + typedef LinkedHashSetIterator<LinkedHashSet> iterator; + friend class LinkedHashSetIterator<LinkedHashSet>; + typedef LinkedHashSetConstIterator<LinkedHashSet> const_iterator; + friend class LinkedHashSetConstIterator<LinkedHashSet>; + + typedef LinkedHashSetReverseIterator<LinkedHashSet> reverse_iterator; + friend class LinkedHashSetReverseIterator<LinkedHashSet>; + typedef LinkedHashSetConstReverseIterator<LinkedHashSet> + const_reverse_iterator; + friend class LinkedHashSetConstReverseIterator<LinkedHashSet>; + + struct AddResult final { + STACK_ALLOCATED(); + AddResult(const typename ImplType::AddResult& hashTableAddResult) + : storedValue(&hashTableAddResult.storedValue->m_value), + isNewEntry(hashTableAddResult.isNewEntry) {} + + Value* storedValue; + bool isNewEntry; + }; + + typedef typename HashTraits<Value>::PeekInType ValuePeekInType; + + LinkedHashSet(); + LinkedHashSet(const LinkedHashSet&); + LinkedHashSet(LinkedHashSet&&); + LinkedHashSet& operator=(const LinkedHashSet&); + LinkedHashSet& operator=(LinkedHashSet&&); + + // Needs finalization. The anchor needs to unlink itself from the chain. + ~LinkedHashSet(); + + static void finalize(void* pointer) { + reinterpret_cast<LinkedHashSet*>(pointer)->~LinkedHashSet(); + } + void finalizeGarbageCollectedObject() { finalize(this); } + + void swap(LinkedHashSet&); + + unsigned size() const { return m_impl.size(); } + unsigned capacity() const { return m_impl.capacity(); } + bool isEmpty() const { return m_impl.isEmpty(); } + + iterator begin() { return makeIterator(firstNode()); } + iterator end() { return makeIterator(anchor()); } + const_iterator begin() const { return makeConstIterator(firstNode()); } + const_iterator end() const { return makeConstIterator(anchor()); } + + reverse_iterator rbegin() { return makeReverseIterator(lastNode()); } + reverse_iterator rend() { return makeReverseIterator(anchor()); } + const_reverse_iterator rbegin() const { + return makeConstReverseIterator(lastNode()); + } + const_reverse_iterator rend() const { + return makeConstReverseIterator(anchor()); + } + + Value& front(); + const Value& front() const; + void removeFirst(); + + Value& back(); + const Value& back() const; + void pop_back(); + + iterator find(ValuePeekInType); + const_iterator find(ValuePeekInType) const; + bool contains(ValuePeekInType) const; + + // An alternate version of find() that finds the object by hashing and + // comparing with some other type, to avoid the cost of type conversion. + // The HashTranslator interface is defined in HashSet. + template <typename HashTranslator, typename T> + iterator find(const T&); + template <typename HashTranslator, typename T> + const_iterator find(const T&) const; + template <typename HashTranslator, typename T> + bool contains(const T&) const; + + // The return value of insert is a pair of a pointer to the stored value, + // and a bool that is true if an new entry was added. + template <typename IncomingValueType> + AddResult insert(IncomingValueType&&); + + // Same as insert() except that the return value is an + // iterator. Useful in cases where it's needed to have the + // same return value as find() and where it's not possible to + // use a pointer to the storedValue. + template <typename IncomingValueType> + iterator addReturnIterator(IncomingValueType&&); + + // Add the value to the end of the collection. If the value was already in + // the list, it is moved to the end. + template <typename IncomingValueType> + AddResult appendOrMoveToLast(IncomingValueType&&); + + // Add the value to the beginning of the collection. If the value was already + // in the list, it is moved to the beginning. + template <typename IncomingValueType> + AddResult prependOrMoveToFirst(IncomingValueType&&); + + template <typename IncomingValueType> + AddResult insertBefore(ValuePeekInType beforeValue, + IncomingValueType&& newValue); + template <typename IncomingValueType> + AddResult insertBefore(iterator it, IncomingValueType&& newValue) { + return m_impl.template add<NodeHashFunctions>( + std::forward<IncomingValueType>(newValue), it.getNode()); + } + + void erase(ValuePeekInType); + void erase(iterator); + void clear() { m_impl.clear(); } + template <typename Collection> + void removeAll(const Collection& other) { + WTF::removeAll(*this, other); + } + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + m_impl.trace(visitor); + // Should the underlying table be moved by GC, register a callback + // that fixes up the interior pointers that the (Heap)LinkedHashSet keeps. + if (m_impl.m_table) { + Allocator::registerBackingStoreCallback( + visitor, m_impl.m_table, moveBackingCallback, + reinterpret_cast<void*>(&m_anchor)); + } + } + + int64_t modifications() const { return m_impl.modifications(); } + void checkModifications(int64_t mods) const { + m_impl.checkModifications(mods); + } + + private: + Node* anchor() { return reinterpret_cast<Node*>(&m_anchor); } + const Node* anchor() const { + return reinterpret_cast<const Node*>(&m_anchor); + } + Node* firstNode() { return reinterpret_cast<Node*>(m_anchor.m_next); } + const Node* firstNode() const { + return reinterpret_cast<const Node*>(m_anchor.m_next); + } + Node* lastNode() { return reinterpret_cast<Node*>(m_anchor.m_prev); } + const Node* lastNode() const { + return reinterpret_cast<const Node*>(m_anchor.m_prev); + } + + iterator makeIterator(const Node* position) { + return iterator(position, this); + } + const_iterator makeConstIterator(const Node* position) const { + return const_iterator(position, this); + } + reverse_iterator makeReverseIterator(const Node* position) { + return reverse_iterator(position, this); + } + const_reverse_iterator makeConstReverseIterator(const Node* position) const { + return const_reverse_iterator(position, this); + } + + static void moveBackingCallback(void* anchor, + void* from, + void* to, + size_t size) { + // Note: the hash table move may have been overlapping; linearly scan the + // entire table and fixup interior pointers into the old region with + // correspondingly offset ones into the new. + size_t tableSize = size / sizeof(Node); + Node* table = reinterpret_cast<Node*>(to); + NodeBase* fromStart = reinterpret_cast<NodeBase*>(from); + NodeBase* fromEnd = + reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(from) + size); + for (Node* element = table + tableSize - 1; element >= table; element--) { + Node& node = *element; + if (ImplType::isEmptyOrDeletedBucket(node)) + continue; + if (node.m_next >= fromStart && node.m_next < fromEnd) { + size_t diff = reinterpret_cast<uintptr_t>(node.m_next) - + reinterpret_cast<uintptr_t>(from); + node.m_next = + reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); + } + if (node.m_prev >= fromStart && node.m_prev < fromEnd) { + size_t diff = reinterpret_cast<uintptr_t>(node.m_prev) - + reinterpret_cast<uintptr_t>(from); + node.m_prev = + reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); + } + } + NodeBase* anchorNode = reinterpret_cast<NodeBase*>(anchor); + if (anchorNode->m_next >= fromStart && anchorNode->m_next < fromEnd) { + size_t diff = reinterpret_cast<uintptr_t>(anchorNode->m_next) - + reinterpret_cast<uintptr_t>(from); + anchorNode->m_next = + reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); + } + if (anchorNode->m_prev >= fromStart && anchorNode->m_prev < fromEnd) { + size_t diff = reinterpret_cast<uintptr_t>(anchorNode->m_prev) - + reinterpret_cast<uintptr_t>(from); + anchorNode->m_prev = + reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); + } + } + + ImplType m_impl; + NodeBase m_anchor; +}; + +template <typename Value, typename HashFunctions, typename Allocator> +struct LinkedHashSetTranslator { + STATIC_ONLY(LinkedHashSetTranslator); + typedef LinkedHashSetNode<Value, Allocator> Node; + typedef LinkedHashSetNodeBase NodeBase; + typedef typename HashTraits<Value>::PeekInType ValuePeekInType; + static unsigned hash(const Node& node) { + return HashFunctions::hash(node.m_value); + } + static unsigned hash(const ValuePeekInType& key) { + return HashFunctions::hash(key); + } + static bool equal(const Node& a, const ValuePeekInType& b) { + return HashFunctions::equal(a.m_value, b); + } + static bool equal(const Node& a, const Node& b) { + return HashFunctions::equal(a.m_value, b.m_value); + } + template <typename IncomingValueType> + static void translate(Node& location, + IncomingValueType&& key, + NodeBase* anchor) { + anchor->insertBefore(location); + location.m_value = std::forward<IncomingValueType>(key); + } + + // Empty (or deleted) slots have the m_next pointer set to null, but we + // don't do anything to the other fields, which may contain junk. + // Therefore you can't compare a newly constructed empty value with a + // slot and get the right answer. + static const bool safeToCompareToEmptyOrDeleted = false; +}; + +template <typename Value, typename Allocator> +struct LinkedHashSetExtractor { + STATIC_ONLY(LinkedHashSetExtractor); + static const Value& extract(const LinkedHashSetNode<Value, Allocator>& node) { + return node.m_value; + } +}; + +template <typename Value, typename ValueTraitsArg, typename Allocator> +struct LinkedHashSetTraits + : public SimpleClassHashTraits<LinkedHashSetNode<Value, Allocator>> { + STATIC_ONLY(LinkedHashSetTraits); + typedef LinkedHashSetNode<Value, Allocator> Node; + typedef ValueTraitsArg ValueTraits; + + // The slot is empty when the m_next field is zero so it's safe to zero + // the backing. + static const bool emptyValueIsZero = true; + + static const bool hasIsEmptyValueFunction = true; + static bool isEmptyValue(const Node& node) { return !node.m_next; } + + static const int deletedValue = -1; + + static void constructDeletedValue(Node& slot, bool) { + slot.m_next = reinterpret_cast<Node*>(deletedValue); + } + static bool isDeletedValue(const Node& slot) { + return slot.m_next == reinterpret_cast<Node*>(deletedValue); + } + + // Whether we need to trace and do weak processing depends on the traits of + // the type inside the node. + template <typename U = void> + struct IsTraceableInCollection { + STATIC_ONLY(IsTraceableInCollection); + static const bool value = + ValueTraits::template IsTraceableInCollection<>::value; + }; + static const WeakHandlingFlag weakHandlingFlag = + ValueTraits::weakHandlingFlag; +}; + +template <typename LinkedHashSetType> +class LinkedHashSetIterator { + DISALLOW_NEW(); + + private: + typedef typename LinkedHashSetType::Node Node; + typedef typename LinkedHashSetType::Traits Traits; + + typedef typename LinkedHashSetType::Value& ReferenceType; + typedef typename LinkedHashSetType::Value* PointerType; + + typedef LinkedHashSetConstIterator<LinkedHashSetType> const_iterator; + + Node* getNode() { return const_cast<Node*>(m_iterator.getNode()); } + + protected: + LinkedHashSetIterator(const Node* position, LinkedHashSetType* m_container) + : m_iterator(position, m_container) {} + + public: + // Default copy, assignment and destructor are OK. + + PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + LinkedHashSetIterator& operator++() { + ++m_iterator; + return *this; + } + LinkedHashSetIterator& operator--() { + --m_iterator; + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + // Comparison. + bool operator==(const LinkedHashSetIterator& other) const { + return m_iterator == other.m_iterator; + } + bool operator!=(const LinkedHashSetIterator& other) const { + return m_iterator != other.m_iterator; + } + + operator const_iterator() const { return m_iterator; } + + protected: + const_iterator m_iterator; + template <typename T, typename U, typename V, typename W> + friend class LinkedHashSet; +}; + +template <typename LinkedHashSetType> +class LinkedHashSetConstIterator { + DISALLOW_NEW(); + + private: + typedef typename LinkedHashSetType::Node Node; + typedef typename LinkedHashSetType::Traits Traits; + + typedef const typename LinkedHashSetType::Value& ReferenceType; + typedef const typename LinkedHashSetType::Value* PointerType; + + const Node* getNode() const { return static_cast<const Node*>(m_position); } + + protected: + LinkedHashSetConstIterator(const LinkedHashSetNodeBase* position, + const LinkedHashSetType* container) + : m_position(position) +#if DCHECK_IS_ON() + , + m_container(container), + m_containerModifications(container->modifications()) +#endif + { + } + + public: + PointerType get() const { + checkModifications(); + return &static_cast<const Node*>(m_position)->m_value; + } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + LinkedHashSetConstIterator& operator++() { + DCHECK(m_position); + checkModifications(); + m_position = m_position->m_next; + return *this; + } + + LinkedHashSetConstIterator& operator--() { + DCHECK(m_position); + checkModifications(); + m_position = m_position->m_prev; + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + // Comparison. + bool operator==(const LinkedHashSetConstIterator& other) const { + return m_position == other.m_position; + } + bool operator!=(const LinkedHashSetConstIterator& other) const { + return m_position != other.m_position; + } + + private: + const LinkedHashSetNodeBase* m_position; +#if DCHECK_IS_ON() + void checkModifications() const { + m_container->checkModifications(m_containerModifications); + } + const LinkedHashSetType* m_container; + int64_t m_containerModifications; +#else + void checkModifications() const {} +#endif + template <typename T, typename U, typename V, typename W> + friend class LinkedHashSet; + friend class LinkedHashSetIterator<LinkedHashSetType>; +}; + +template <typename LinkedHashSetType> +class LinkedHashSetReverseIterator + : public LinkedHashSetIterator<LinkedHashSetType> { + typedef LinkedHashSetIterator<LinkedHashSetType> Superclass; + typedef LinkedHashSetConstReverseIterator<LinkedHashSetType> + const_reverse_iterator; + typedef typename LinkedHashSetType::Node Node; + + protected: + LinkedHashSetReverseIterator(const Node* position, + LinkedHashSetType* container) + : Superclass(position, container) {} + + public: + LinkedHashSetReverseIterator& operator++() { + Superclass::operator--(); + return *this; + } + LinkedHashSetReverseIterator& operator--() { + Superclass::operator++(); + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + operator const_reverse_iterator() const { + return *reinterpret_cast<const_reverse_iterator*>(this); + } + + template <typename T, typename U, typename V, typename W> + friend class LinkedHashSet; +}; + +template <typename LinkedHashSetType> +class LinkedHashSetConstReverseIterator + : public LinkedHashSetConstIterator<LinkedHashSetType> { + typedef LinkedHashSetConstIterator<LinkedHashSetType> Superclass; + typedef typename LinkedHashSetType::Node Node; + + public: + LinkedHashSetConstReverseIterator(const Node* position, + const LinkedHashSetType* container) + : Superclass(position, container) {} + + LinkedHashSetConstReverseIterator& operator++() { + Superclass::operator--(); + return *this; + } + LinkedHashSetConstReverseIterator& operator--() { + Superclass::operator++(); + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + template <typename T, typename U, typename V, typename W> + friend class LinkedHashSet; +}; + +template <typename T, typename U, typename V, typename Allocator> +inline LinkedHashSet<T, U, V, Allocator>::LinkedHashSet() { + static_assert( + Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<T>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap LinkedHashSet. Use HeapLinkedHashSet<Member<T>> instead."); +} + +template <typename T, typename U, typename V, typename W> +inline LinkedHashSet<T, U, V, W>::LinkedHashSet(const LinkedHashSet& other) + : m_anchor() { + const_iterator end = other.end(); + for (const_iterator it = other.begin(); it != end; ++it) + insert(*it); +} + +template <typename T, typename U, typename V, typename W> +inline LinkedHashSet<T, U, V, W>::LinkedHashSet(LinkedHashSet&& other) + : m_anchor() { + swap(other); +} + +template <typename T, typename U, typename V, typename W> +inline LinkedHashSet<T, U, V, W>& LinkedHashSet<T, U, V, W>::operator=( + const LinkedHashSet& other) { + LinkedHashSet tmp(other); + swap(tmp); + return *this; +} + +template <typename T, typename U, typename V, typename W> +inline LinkedHashSet<T, U, V, W>& LinkedHashSet<T, U, V, W>::operator=( + LinkedHashSet&& other) { + swap(other); + return *this; +} + +template <typename T, typename U, typename V, typename W> +inline void LinkedHashSet<T, U, V, W>::swap(LinkedHashSet& other) { + m_impl.swap(other.m_impl); + swapAnchor(m_anchor, other.m_anchor); +} + +template <typename T, typename U, typename V, typename Allocator> +inline LinkedHashSet<T, U, V, Allocator>::~LinkedHashSet() { + // The destructor of m_anchor will implicitly be called here, which will + // unlink the anchor from the collection. +} + +template <typename T, typename U, typename V, typename W> +inline T& LinkedHashSet<T, U, V, W>::front() { + DCHECK(!isEmpty()); + return firstNode()->m_value; +} + +template <typename T, typename U, typename V, typename W> +inline const T& LinkedHashSet<T, U, V, W>::front() const { + DCHECK(!isEmpty()); + return firstNode()->m_value; +} + +template <typename T, typename U, typename V, typename W> +inline void LinkedHashSet<T, U, V, W>::removeFirst() { + DCHECK(!isEmpty()); + m_impl.remove(static_cast<Node*>(m_anchor.m_next)); +} + +template <typename T, typename U, typename V, typename W> +inline T& LinkedHashSet<T, U, V, W>::back() { + DCHECK(!isEmpty()); + return lastNode()->m_value; +} + +template <typename T, typename U, typename V, typename W> +inline const T& LinkedHashSet<T, U, V, W>::back() const { + DCHECK(!isEmpty()); + return lastNode()->m_value; +} + +template <typename T, typename U, typename V, typename W> +inline void LinkedHashSet<T, U, V, W>::pop_back() { + DCHECK(!isEmpty()); + m_impl.remove(static_cast<Node*>(m_anchor.m_prev)); +} + +template <typename T, typename U, typename V, typename W> +inline typename LinkedHashSet<T, U, V, W>::iterator +LinkedHashSet<T, U, V, W>::find(ValuePeekInType value) { + LinkedHashSet::Node* node = + m_impl.template lookup<LinkedHashSet::NodeHashFunctions, ValuePeekInType>( + value); + if (!node) + return end(); + return makeIterator(node); +} + +template <typename T, typename U, typename V, typename W> +inline typename LinkedHashSet<T, U, V, W>::const_iterator +LinkedHashSet<T, U, V, W>::find(ValuePeekInType value) const { + const LinkedHashSet::Node* node = + m_impl.template lookup<LinkedHashSet::NodeHashFunctions, ValuePeekInType>( + value); + if (!node) + return end(); + return makeConstIterator(node); +} + +template <typename Translator> +struct LinkedHashSetTranslatorAdapter { + STATIC_ONLY(LinkedHashSetTranslatorAdapter); + template <typename T> + static unsigned hash(const T& key) { + return Translator::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return Translator::equal(a.m_value, b); + } +}; + +template <typename Value, typename U, typename V, typename W> +template <typename HashTranslator, typename T> +inline typename LinkedHashSet<Value, U, V, W>::iterator +LinkedHashSet<Value, U, V, W>::find(const T& value) { + typedef LinkedHashSetTranslatorAdapter<HashTranslator> TranslatedFunctions; + const LinkedHashSet::Node* node = + m_impl.template lookup<TranslatedFunctions, const T&>(value); + if (!node) + return end(); + return makeIterator(node); +} + +template <typename Value, typename U, typename V, typename W> +template <typename HashTranslator, typename T> +inline typename LinkedHashSet<Value, U, V, W>::const_iterator +LinkedHashSet<Value, U, V, W>::find(const T& value) const { + typedef LinkedHashSetTranslatorAdapter<HashTranslator> TranslatedFunctions; + const LinkedHashSet::Node* node = + m_impl.template lookup<TranslatedFunctions, const T&>(value); + if (!node) + return end(); + return makeConstIterator(node); +} + +template <typename Value, typename U, typename V, typename W> +template <typename HashTranslator, typename T> +inline bool LinkedHashSet<Value, U, V, W>::contains(const T& value) const { + return m_impl + .template contains<LinkedHashSetTranslatorAdapter<HashTranslator>>(value); +} + +template <typename T, typename U, typename V, typename W> +inline bool LinkedHashSet<T, U, V, W>::contains(ValuePeekInType value) const { + return m_impl.template contains<NodeHashFunctions>(value); +} + +template <typename Value, + typename HashFunctions, + typename Traits, + typename Allocator> +template <typename IncomingValueType> +typename LinkedHashSet<Value, HashFunctions, Traits, Allocator>::AddResult +LinkedHashSet<Value, HashFunctions, Traits, Allocator>::insert( + IncomingValueType&& value) { + return m_impl.template add<NodeHashFunctions>( + std::forward<IncomingValueType>(value), &m_anchor); +} + +template <typename T, typename U, typename V, typename W> +template <typename IncomingValueType> +typename LinkedHashSet<T, U, V, W>::iterator +LinkedHashSet<T, U, V, W>::addReturnIterator(IncomingValueType&& value) { + typename ImplType::AddResult result = m_impl.template add<NodeHashFunctions>( + std::forward<IncomingValueType>(value), &m_anchor); + return makeIterator(result.storedValue); +} + +template <typename T, typename U, typename V, typename W> +template <typename IncomingValueType> +typename LinkedHashSet<T, U, V, W>::AddResult +LinkedHashSet<T, U, V, W>::appendOrMoveToLast(IncomingValueType&& value) { + typename ImplType::AddResult result = m_impl.template add<NodeHashFunctions>( + std::forward<IncomingValueType>(value), &m_anchor); + Node* node = result.storedValue; + if (!result.isNewEntry) { + node->unlink(); + m_anchor.insertBefore(*node); + } + return result; +} + +template <typename T, typename U, typename V, typename W> +template <typename IncomingValueType> +typename LinkedHashSet<T, U, V, W>::AddResult +LinkedHashSet<T, U, V, W>::prependOrMoveToFirst(IncomingValueType&& value) { + typename ImplType::AddResult result = m_impl.template add<NodeHashFunctions>( + std::forward<IncomingValueType>(value), m_anchor.m_next); + Node* node = result.storedValue; + if (!result.isNewEntry) { + node->unlink(); + m_anchor.insertAfter(*node); + } + return result; +} + +template <typename T, typename U, typename V, typename W> +template <typename IncomingValueType> +typename LinkedHashSet<T, U, V, W>::AddResult +LinkedHashSet<T, U, V, W>::insertBefore(ValuePeekInType beforeValue, + IncomingValueType&& newValue) { + return insertBefore(find(beforeValue), + std::forward<IncomingValueType>(newValue)); +} + +template <typename T, typename U, typename V, typename W> +inline void LinkedHashSet<T, U, V, W>::erase(iterator it) { + if (it == end()) + return; + m_impl.remove(it.getNode()); +} + +template <typename T, typename U, typename V, typename W> +inline void LinkedHashSet<T, U, V, W>::erase(ValuePeekInType value) { + erase(find(value)); +} + +inline void swapAnchor(LinkedHashSetNodeBase& a, LinkedHashSetNodeBase& b) { + DCHECK(a.m_prev); + DCHECK(a.m_next); + DCHECK(b.m_prev); + DCHECK(b.m_next); + swap(a.m_prev, b.m_prev); + swap(a.m_next, b.m_next); + if (b.m_next == &a) { + DCHECK_EQ(b.m_prev, &a); + b.m_next = &b; + b.m_prev = &b; + } else { + b.m_next->m_prev = &b; + b.m_prev->m_next = &b; + } + if (a.m_next == &b) { + DCHECK_EQ(a.m_prev, &b); + a.m_next = &a; + a.m_prev = &a; + } else { + a.m_next->m_prev = &a; + a.m_prev->m_next = &a; + } +} + +inline void swap(LinkedHashSetNodeBase& a, LinkedHashSetNodeBase& b) { + DCHECK_NE(a.m_next, &a); + DCHECK_NE(b.m_next, &b); + swap(a.m_prev, b.m_prev); + swap(a.m_next, b.m_next); + if (b.m_next) { + b.m_next->m_prev = &b; + b.m_prev->m_next = &b; + } + if (a.m_next) { + a.m_next->m_prev = &a; + a.m_prev->m_next = &a; + } +} + +template <typename T, typename Allocator> +inline void swap(LinkedHashSetNode<T, Allocator>& a, + LinkedHashSetNode<T, Allocator>& b) { + typedef LinkedHashSetNodeBase Base; + // The key and value cannot be swapped atomically, and it would be + // wrong to have a GC when only one was swapped and the other still + // contained garbage (eg. from a previous use of the same slot). + // Therefore we forbid a GC until both the key and the value are + // swapped. + Allocator::enterGCForbiddenScope(); + swap(static_cast<Base&>(a), static_cast<Base&>(b)); + swap(a.m_value, b.m_value); + Allocator::leaveGCForbiddenScope(); +} + +} // namespace WTF + +using WTF::LinkedHashSet; + +#endif /* WTF_LinkedHashSet_h */
diff --git a/third_party/WebKit/Source/platform/wtf/ListHashSet.h b/third_party/WebKit/Source/platform/wtf/ListHashSet.h new file mode 100644 index 0000000..3e82725 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/ListHashSet.h
@@ -0,0 +1,1135 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights + * reserved. + * Copyright (C) 2011, Benjamin Poulain <ikipou@gmail.com> + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_ListHashSet_h +#define WTF_ListHashSet_h + +#include "platform/wtf/HashSet.h" +#include "platform/wtf/allocator/PartitionAllocator.h" +#include <memory> + +namespace WTF { + +// ListHashSet: Just like HashSet, this class provides a Set interface - a +// collection of unique objects with O(1) insertion, removal and test for +// containership. However, it also has an order - iterating it will always give +// back values in the order in which they are added. + +// Unlike iteration of most WTF Hash data structures, iteration is guaranteed +// safe against mutation of the ListHashSet, except for removal of the item +// currently pointed to by a given iterator. + +template <typename Value, + size_t inlineCapacity, + typename HashFunctions, + typename Allocator> +class ListHashSet; + +template <typename Set> +class ListHashSetIterator; +template <typename Set> +class ListHashSetConstIterator; +template <typename Set> +class ListHashSetReverseIterator; +template <typename Set> +class ListHashSetConstReverseIterator; + +template <typename ValueArg> +class ListHashSetNodeBase; +template <typename ValueArg, typename Allocator> +class ListHashSetNode; +template <typename ValueArg, size_t inlineCapacity> +struct ListHashSetAllocator; + +template <typename HashArg> +struct ListHashSetNodeHashFunctions; +template <typename HashArg> +struct ListHashSetTranslator; + +// Note that for a ListHashSet you cannot specify the HashTraits as a template +// argument. It uses the default hash traits for the ValueArg type. +template <typename ValueArg, + size_t inlineCapacity = 256, + typename HashArg = typename DefaultHash<ValueArg>::Hash, + typename AllocatorArg = + ListHashSetAllocator<ValueArg, inlineCapacity>> +class ListHashSet + : public ConditionalDestructor< + ListHashSet<ValueArg, inlineCapacity, HashArg, AllocatorArg>, + AllocatorArg::isGarbageCollected> { + typedef AllocatorArg Allocator; + USE_ALLOCATOR(ListHashSet, Allocator); + + typedef ListHashSetNode<ValueArg, Allocator> Node; + typedef HashTraits<Node*> NodeTraits; + typedef ListHashSetNodeHashFunctions<HashArg> NodeHash; + typedef ListHashSetTranslator<HashArg> BaseTranslator; + + typedef HashTable<Node*, + Node*, + IdentityExtractor, + NodeHash, + NodeTraits, + NodeTraits, + typename Allocator::TableAllocator> + ImplType; + typedef HashTableIterator<Node*, + Node*, + IdentityExtractor, + NodeHash, + NodeTraits, + NodeTraits, + typename Allocator::TableAllocator> + ImplTypeIterator; + typedef HashTableConstIterator<Node*, + Node*, + IdentityExtractor, + NodeHash, + NodeTraits, + NodeTraits, + typename Allocator::TableAllocator> + ImplTypeConstIterator; + + typedef HashArg HashFunctions; + + public: + typedef ValueArg ValueType; + typedef HashTraits<ValueType> ValueTraits; + typedef typename ValueTraits::PeekInType ValuePeekInType; + + typedef ListHashSetIterator<ListHashSet> iterator; + typedef ListHashSetConstIterator<ListHashSet> const_iterator; + friend class ListHashSetIterator<ListHashSet>; + friend class ListHashSetConstIterator<ListHashSet>; + + typedef ListHashSetReverseIterator<ListHashSet> reverse_iterator; + typedef ListHashSetConstReverseIterator<ListHashSet> const_reverse_iterator; + friend class ListHashSetReverseIterator<ListHashSet>; + friend class ListHashSetConstReverseIterator<ListHashSet>; + + struct AddResult final { + STACK_ALLOCATED(); + friend class ListHashSet<ValueArg, inlineCapacity, HashArg, AllocatorArg>; + AddResult(Node* node, bool isNewEntry) + : storedValue(&node->m_value), isNewEntry(isNewEntry), m_node(node) {} + ValueType* storedValue; + bool isNewEntry; + + private: + Node* m_node; + }; + + ListHashSet(); + ListHashSet(const ListHashSet&); + ListHashSet(ListHashSet&&); + ListHashSet& operator=(const ListHashSet&); + ListHashSet& operator=(ListHashSet&&); + void finalize(); + + void swap(ListHashSet&); + + unsigned size() const { return m_impl.size(); } + unsigned capacity() const { return m_impl.capacity(); } + bool isEmpty() const { return m_impl.isEmpty(); } + + iterator begin() { return makeIterator(m_head); } + iterator end() { return makeIterator(0); } + const_iterator begin() const { return makeConstIterator(m_head); } + const_iterator end() const { return makeConstIterator(0); } + + reverse_iterator rbegin() { return makeReverseIterator(m_tail); } + reverse_iterator rend() { return makeReverseIterator(0); } + const_reverse_iterator rbegin() const { + return makeConstReverseIterator(m_tail); + } + const_reverse_iterator rend() const { return makeConstReverseIterator(0); } + + ValueType& front(); + const ValueType& front() const; + void removeFirst(); + + ValueType& back(); + const ValueType& back() const; + void pop_back(); + + iterator find(ValuePeekInType); + const_iterator find(ValuePeekInType) const; + bool contains(ValuePeekInType) const; + + // An alternate version of find() that finds the object by hashing and + // comparing with some other type, to avoid the cost of type conversion. + // The HashTranslator interface is defined in HashSet. + template <typename HashTranslator, typename T> + iterator find(const T&); + template <typename HashTranslator, typename T> + const_iterator find(const T&) const; + template <typename HashTranslator, typename T> + bool contains(const T&) const; + + // The return value of insert is a pair of a pointer to the stored value, and + // a bool that is true if an new entry was added. + template <typename IncomingValueType> + AddResult insert(IncomingValueType&&); + + // Same as insert() except that the return value is an iterator. Useful in + // cases where it's needed to have the same return value as find() and where + // it's not possible to use a pointer to the storedValue. + template <typename IncomingValueType> + iterator addReturnIterator(IncomingValueType&&); + + // Add the value to the end of the collection. If the value was already in + // the list, it is moved to the end. + template <typename IncomingValueType> + AddResult appendOrMoveToLast(IncomingValueType&&); + + // Add the value to the beginning of the collection. If the value was + // already in the list, it is moved to the beginning. + template <typename IncomingValueType> + AddResult prependOrMoveToFirst(IncomingValueType&&); + + template <typename IncomingValueType> + AddResult insertBefore(ValuePeekInType beforeValue, + IncomingValueType&& newValue); + template <typename IncomingValueType> + AddResult insertBefore(iterator, IncomingValueType&&); + + void erase(ValuePeekInType value) { return erase(find(value)); } + void erase(iterator); + void clear(); + template <typename Collection> + void removeAll(const Collection& other) { + WTF::removeAll(*this, other); + } + + ValueType take(iterator); + ValueType take(ValuePeekInType); + ValueType takeFirst(); + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher); + + private: + void unlink(Node*); + void unlinkAndDelete(Node*); + void appendNode(Node*); + void prependNode(Node*); + void insertNodeBefore(Node* beforeNode, Node* newNode); + void deleteAllNodes(); + Allocator* getAllocator() const { return m_allocatorProvider.get(); } + void createAllocatorIfNeeded() { + m_allocatorProvider.createAllocatorIfNeeded(); + } + void deallocate(Node* node) const { m_allocatorProvider.deallocate(node); } + + iterator makeIterator(Node* position) { return iterator(this, position); } + const_iterator makeConstIterator(Node* position) const { + return const_iterator(this, position); + } + reverse_iterator makeReverseIterator(Node* position) { + return reverse_iterator(this, position); + } + const_reverse_iterator makeConstReverseIterator(Node* position) const { + return const_reverse_iterator(this, position); + } + + ImplType m_impl; + Node* m_head; + Node* m_tail; + typename Allocator::AllocatorProvider m_allocatorProvider; +}; + +// ListHashSetNode has this base class to hold the members because the MSVC +// compiler otherwise gets into circular template dependencies when trying to do +// sizeof on a node. +template <typename ValueArg> +class ListHashSetNodeBase { + DISALLOW_NEW(); + + protected: + template <typename U> + explicit ListHashSetNodeBase(U&& value) : m_value(std::forward<U>(value)) {} + + public: + ValueArg m_value; + ListHashSetNodeBase* m_prev = nullptr; + ListHashSetNodeBase* m_next = nullptr; +#if DCHECK_IS_ON() + bool m_isAllocated = true; +#endif +}; + +// This allocator is only used for non-Heap ListHashSets. +template <typename ValueArg, size_t inlineCapacity> +struct ListHashSetAllocator : public PartitionAllocator { + typedef PartitionAllocator TableAllocator; + typedef ListHashSetNode<ValueArg, ListHashSetAllocator> Node; + typedef ListHashSetNodeBase<ValueArg> NodeBase; + + class AllocatorProvider { + DISALLOW_NEW(); + + public: + AllocatorProvider() : m_allocator(nullptr) {} + void createAllocatorIfNeeded() { + if (!m_allocator) + m_allocator = new ListHashSetAllocator; + } + + void releaseAllocator() { + delete m_allocator; + m_allocator = nullptr; + } + + void swap(AllocatorProvider& other) { + std::swap(m_allocator, other.m_allocator); + } + + void deallocate(Node* node) const { + DCHECK(m_allocator); + m_allocator->deallocate(node); + } + + ListHashSetAllocator* get() const { + DCHECK(m_allocator); + return m_allocator; + } + + private: + // Not using std::unique_ptr as this pointer should be deleted at + // releaseAllocator() method rather than at destructor. + ListHashSetAllocator* m_allocator; + }; + + ListHashSetAllocator() + : m_freeList(pool()), m_isDoneWithInitialFreeList(false) { + memset(m_pool.buffer, 0, sizeof(m_pool.buffer)); + } + + Node* allocateNode() { + Node* result = m_freeList; + + if (!result) + return static_cast<Node*>(WTF::Partitions::fastMalloc( + sizeof(NodeBase), WTF_HEAP_PROFILER_TYPE_NAME(Node))); + +#if DCHECK_IS_ON() + DCHECK(!result->m_isAllocated); +#endif + + Node* next = result->next(); +#if DCHECK_IS_ON() + DCHECK(!next || !next->m_isAllocated); +#endif + if (!next && !m_isDoneWithInitialFreeList) { + next = result + 1; + if (next == pastPool()) { + m_isDoneWithInitialFreeList = true; + next = nullptr; + } else { + DCHECK(inPool(next)); +#if DCHECK_IS_ON() + DCHECK(!next->m_isAllocated); +#endif + } + } + m_freeList = next; + + return result; + } + + void deallocate(Node* node) { + if (inPool(node)) { +#if DCHECK_IS_ON() + node->m_isAllocated = false; +#endif + node->m_next = m_freeList; + m_freeList = node; + return; + } + + WTF::Partitions::fastFree(node); + } + + bool inPool(Node* node) { return node >= pool() && node < pastPool(); } + + static void traceValue(typename PartitionAllocator::Visitor* visitor, + Node* node) {} + + private: + Node* pool() { return reinterpret_cast_ptr<Node*>(m_pool.buffer); } + Node* pastPool() { return pool() + m_poolSize; } + + Node* m_freeList; + bool m_isDoneWithInitialFreeList; +#if defined(MEMORY_SANITIZER_INITIAL_SIZE) + // The allocation pool for nodes is one big chunk that ASAN has no insight + // into, so it can cloak errors. Make it as small as possible to force nodes + // to be allocated individually where ASAN can see them. + static const size_t m_poolSize = 1; +#else + static const size_t m_poolSize = inlineCapacity; +#endif + AlignedBuffer<sizeof(NodeBase) * m_poolSize, WTF_ALIGN_OF(NodeBase)> m_pool; +}; + +template <typename ValueArg, typename AllocatorArg> +class ListHashSetNode : public ListHashSetNodeBase<ValueArg> { + public: + typedef AllocatorArg NodeAllocator; + typedef ValueArg Value; + + template <typename U> + ListHashSetNode(U&& value) + : ListHashSetNodeBase<ValueArg>(std::forward<U>(value)) {} + + void* operator new(size_t, NodeAllocator* allocator) { + static_assert( + sizeof(ListHashSetNode) == sizeof(ListHashSetNodeBase<ValueArg>), + "please add any fields to the base"); + return allocator->allocateNode(); + } + + void setWasAlreadyDestructed() { + if (NodeAllocator::isGarbageCollected && + !IsTriviallyDestructible<ValueArg>::value) + this->m_prev = unlinkedNodePointer(); + } + + bool wasAlreadyDestructed() const { + DCHECK(NodeAllocator::isGarbageCollected); + return this->m_prev == unlinkedNodePointer(); + } + + static void finalize(void* pointer) { + // No need to waste time calling finalize if it's not needed. + DCHECK(!IsTriviallyDestructible<ValueArg>::value); + ListHashSetNode* self = reinterpret_cast_ptr<ListHashSetNode*>(pointer); + + // Check whether this node was already destructed before being unlinked + // from the collection. + if (self->wasAlreadyDestructed()) + return; + + self->m_value.~ValueArg(); + } + void finalizeGarbageCollectedObject() { finalize(this); } + + void destroy(NodeAllocator* allocator) { + this->~ListHashSetNode(); + setWasAlreadyDestructed(); + allocator->deallocate(this); + } + + // This is not called in normal tracing, but it is called if we find a + // pointer to a node on the stack using conservative scanning. Since the + // original ListHashSet may no longer exist we make sure to mark the + // neighbours in the chain too. + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + // The conservative stack scan can find nodes that have been removed + // from the set and destructed. We don't need to trace these, and it + // would be wrong to do so, because the class will not expect the trace + // method to be called after the destructor. It's an error to remove a + // node from the ListHashSet while an iterator is positioned at that + // node, so there should be no valid pointers from the stack to a + // destructed node. + if (wasAlreadyDestructed()) + return; + NodeAllocator::traceValue(visitor, this); + visitor->mark(next()); + visitor->mark(prev()); + } + + ListHashSetNode* next() const { + return reinterpret_cast<ListHashSetNode*>(this->m_next); + } + ListHashSetNode* prev() const { + return reinterpret_cast<ListHashSetNode*>(this->m_prev); + } + + // Don't add fields here, the ListHashSetNodeBase and this should have the + // same size. + + static ListHashSetNode* unlinkedNodePointer() { + return reinterpret_cast<ListHashSetNode*>(-1); + } + + template <typename HashArg> + friend struct ListHashSetNodeHashFunctions; +}; + +template <typename HashArg> +struct ListHashSetNodeHashFunctions { + STATIC_ONLY(ListHashSetNodeHashFunctions); + template <typename T> + static unsigned hash(const T& key) { + return HashArg::hash(key->m_value); + } + template <typename T> + static bool equal(const T& a, const T& b) { + return HashArg::equal(a->m_value, b->m_value); + } + static const bool safeToCompareToEmptyOrDeleted = false; +}; + +template <typename Set> +class ListHashSetIterator { + DISALLOW_NEW(); + + private: + typedef typename Set::const_iterator const_iterator; + typedef typename Set::Node Node; + typedef typename Set::ValueType ValueType; + typedef ValueType& ReferenceType; + typedef ValueType* PointerType; + + ListHashSetIterator(const Set* set, Node* position) + : m_iterator(set, position) {} + + public: + ListHashSetIterator() {} + + // default copy, assignment and destructor are OK + + PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + ListHashSetIterator& operator++() { + ++m_iterator; + return *this; + } + ListHashSetIterator& operator--() { + --m_iterator; + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + // Comparison. + bool operator==(const ListHashSetIterator& other) const { + return m_iterator == other.m_iterator; + } + bool operator!=(const ListHashSetIterator& other) const { + return m_iterator != other.m_iterator; + } + + operator const_iterator() const { return m_iterator; } + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + m_iterator.trace(visitor); + } + + private: + Node* getNode() { return m_iterator.getNode(); } + + const_iterator m_iterator; + + template <typename T, size_t inlineCapacity, typename U, typename V> + friend class ListHashSet; +}; + +template <typename Set> +class ListHashSetConstIterator { + DISALLOW_NEW(); + + private: + typedef typename Set::const_iterator const_iterator; + typedef typename Set::Node Node; + typedef typename Set::ValueType ValueType; + typedef const ValueType& ReferenceType; + typedef const ValueType* PointerType; + + friend class ListHashSetIterator<Set>; + + ListHashSetConstIterator(const Set* set, Node* position) + : m_set(set), m_position(position) {} + + public: + ListHashSetConstIterator() {} + + PointerType get() const { return &m_position->m_value; } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + ListHashSetConstIterator& operator++() { + DCHECK(m_position); + m_position = m_position->next(); + return *this; + } + + ListHashSetConstIterator& operator--() { + DCHECK_NE(m_position, m_set->m_head); + if (!m_position) + m_position = m_set->m_tail; + else + m_position = m_position->prev(); + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + // Comparison. + bool operator==(const ListHashSetConstIterator& other) const { + return m_position == other.m_position; + } + bool operator!=(const ListHashSetConstIterator& other) const { + return m_position != other.m_position; + } + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + visitor->trace(*m_set); + visitor->trace(m_position); + } + + private: + Node* getNode() { return m_position; } + + const Set* m_set; + Node* m_position; + + template <typename T, size_t inlineCapacity, typename U, typename V> + friend class ListHashSet; +}; + +template <typename Set> +class ListHashSetReverseIterator { + DISALLOW_NEW(); + + private: + typedef typename Set::const_reverse_iterator const_reverse_iterator; + typedef typename Set::Node Node; + typedef typename Set::ValueType ValueType; + typedef ValueType& ReferenceType; + typedef ValueType* PointerType; + + ListHashSetReverseIterator(const Set* set, Node* position) + : m_iterator(set, position) {} + + public: + ListHashSetReverseIterator() {} + + // default copy, assignment and destructor are OK + + PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + ListHashSetReverseIterator& operator++() { + ++m_iterator; + return *this; + } + ListHashSetReverseIterator& operator--() { + --m_iterator; + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + // Comparison. + bool operator==(const ListHashSetReverseIterator& other) const { + return m_iterator == other.m_iterator; + } + bool operator!=(const ListHashSetReverseIterator& other) const { + return m_iterator != other.m_iterator; + } + + operator const_reverse_iterator() const { return m_iterator; } + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + m_iterator.trace(visitor); + } + + private: + Node* getNode() { return m_iterator.node(); } + + const_reverse_iterator m_iterator; + + template <typename T, size_t inlineCapacity, typename U, typename V> + friend class ListHashSet; +}; + +template <typename Set> +class ListHashSetConstReverseIterator { + DISALLOW_NEW(); + + private: + typedef typename Set::reverse_iterator reverse_iterator; + typedef typename Set::Node Node; + typedef typename Set::ValueType ValueType; + typedef const ValueType& ReferenceType; + typedef const ValueType* PointerType; + + friend class ListHashSetReverseIterator<Set>; + + ListHashSetConstReverseIterator(const Set* set, Node* position) + : m_set(set), m_position(position) {} + + public: + ListHashSetConstReverseIterator() {} + + PointerType get() const { return &m_position->m_value; } + ReferenceType operator*() const { return *get(); } + PointerType operator->() const { return get(); } + + ListHashSetConstReverseIterator& operator++() { + DCHECK(m_position); + m_position = m_position->prev(); + return *this; + } + + ListHashSetConstReverseIterator& operator--() { + DCHECK_NE(m_position, m_set->m_tail); + if (!m_position) + m_position = m_set->m_head; + else + m_position = m_position->next(); + return *this; + } + + // Postfix ++ and -- intentionally omitted. + + // Comparison. + bool operator==(const ListHashSetConstReverseIterator& other) const { + return m_position == other.m_position; + } + bool operator!=(const ListHashSetConstReverseIterator& other) const { + return m_position != other.m_position; + } + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher visitor) { + visitor->trace(*m_set); + visitor->trace(m_position); + } + + private: + Node* getNode() { return m_position; } + + const Set* m_set; + Node* m_position; + + template <typename T, size_t inlineCapacity, typename U, typename V> + friend class ListHashSet; +}; + +template <typename HashFunctions> +struct ListHashSetTranslator { + STATIC_ONLY(ListHashSetTranslator); + template <typename T> + static unsigned hash(const T& key) { + return HashFunctions::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return HashFunctions::equal(a->m_value, b); + } + template <typename T, typename U, typename V> + static void translate(T*& location, U&& key, const V& allocator) { + location = new (const_cast<V*>(&allocator)) T(std::forward<U>(key)); + } +}; + +template <typename T, size_t inlineCapacity, typename U, typename Allocator> +inline ListHashSet<T, inlineCapacity, U, Allocator>::ListHashSet() + : m_head(nullptr), m_tail(nullptr) { + static_assert( + Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<T>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap ListHashSet. Use HeapListHashSet<Member<T>> instead."); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline ListHashSet<T, inlineCapacity, U, V>::ListHashSet( + const ListHashSet& other) + : m_head(nullptr), m_tail(nullptr) { + const_iterator end = other.end(); + for (const_iterator it = other.begin(); it != end; ++it) + insert(*it); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline ListHashSet<T, inlineCapacity, U, V>::ListHashSet(ListHashSet&& other) + : m_head(nullptr), m_tail(nullptr) { + swap(other); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline ListHashSet<T, inlineCapacity, U, V>& +ListHashSet<T, inlineCapacity, U, V>::operator=(const ListHashSet& other) { + ListHashSet tmp(other); + swap(tmp); + return *this; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline ListHashSet<T, inlineCapacity, U, V>& +ListHashSet<T, inlineCapacity, U, V>::operator=(ListHashSet&& other) { + swap(other); + return *this; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline void ListHashSet<T, inlineCapacity, U, V>::swap(ListHashSet& other) { + m_impl.swap(other.m_impl); + std::swap(m_head, other.m_head); + std::swap(m_tail, other.m_tail); + m_allocatorProvider.swap(other.m_allocatorProvider); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline void ListHashSet<T, inlineCapacity, U, V>::finalize() { + static_assert(!Allocator::isGarbageCollected, + "heap allocated ListHashSet should never call finalize()"); + deleteAllNodes(); + m_allocatorProvider.releaseAllocator(); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline T& ListHashSet<T, inlineCapacity, U, V>::front() { + DCHECK(!isEmpty()); + return m_head->m_value; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline void ListHashSet<T, inlineCapacity, U, V>::removeFirst() { + DCHECK(!isEmpty()); + m_impl.remove(m_head); + unlinkAndDelete(m_head); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline const T& ListHashSet<T, inlineCapacity, U, V>::front() const { + DCHECK(!isEmpty()); + return m_head->m_value; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline T& ListHashSet<T, inlineCapacity, U, V>::back() { + DCHECK(!isEmpty()); + return m_tail->m_value; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline const T& ListHashSet<T, inlineCapacity, U, V>::back() const { + DCHECK(!isEmpty()); + return m_tail->m_value; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline void ListHashSet<T, inlineCapacity, U, V>::pop_back() { + DCHECK(!isEmpty()); + m_impl.remove(m_tail); + unlinkAndDelete(m_tail); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline typename ListHashSet<T, inlineCapacity, U, V>::iterator +ListHashSet<T, inlineCapacity, U, V>::find(ValuePeekInType value) { + ImplTypeIterator it = m_impl.template find<BaseTranslator>(value); + if (it == m_impl.end()) + return end(); + return makeIterator(*it); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline typename ListHashSet<T, inlineCapacity, U, V>::const_iterator +ListHashSet<T, inlineCapacity, U, V>::find(ValuePeekInType value) const { + ImplTypeConstIterator it = m_impl.template find<BaseTranslator>(value); + if (it == m_impl.end()) + return end(); + return makeConstIterator(*it); +} + +template <typename Translator> +struct ListHashSetTranslatorAdapter { + STATIC_ONLY(ListHashSetTranslatorAdapter); + template <typename T> + static unsigned hash(const T& key) { + return Translator::hash(key); + } + template <typename T, typename U> + static bool equal(const T& a, const U& b) { + return Translator::equal(a->m_value, b); + } +}; + +template <typename ValueType, size_t inlineCapacity, typename U, typename V> +template <typename HashTranslator, typename T> +inline typename ListHashSet<ValueType, inlineCapacity, U, V>::iterator +ListHashSet<ValueType, inlineCapacity, U, V>::find(const T& value) { + ImplTypeConstIterator it = + m_impl.template find<ListHashSetTranslatorAdapter<HashTranslator>>(value); + if (it == m_impl.end()) + return end(); + return makeIterator(*it); +} + +template <typename ValueType, size_t inlineCapacity, typename U, typename V> +template <typename HashTranslator, typename T> +inline typename ListHashSet<ValueType, inlineCapacity, U, V>::const_iterator +ListHashSet<ValueType, inlineCapacity, U, V>::find(const T& value) const { + ImplTypeConstIterator it = + m_impl.template find<ListHashSetTranslatorAdapter<HashTranslator>>(value); + if (it == m_impl.end()) + return end(); + return makeConstIterator(*it); +} + +template <typename ValueType, size_t inlineCapacity, typename U, typename V> +template <typename HashTranslator, typename T> +inline bool ListHashSet<ValueType, inlineCapacity, U, V>::contains( + const T& value) const { + return m_impl.template contains<ListHashSetTranslatorAdapter<HashTranslator>>( + value); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline bool ListHashSet<T, inlineCapacity, U, V>::contains( + ValuePeekInType value) const { + return m_impl.template contains<BaseTranslator>(value); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename IncomingValueType> +typename ListHashSet<T, inlineCapacity, U, V>::AddResult +ListHashSet<T, inlineCapacity, U, V>::insert(IncomingValueType&& value) { + createAllocatorIfNeeded(); + // The second argument is a const ref. This is useful for the HashTable + // because it lets it take lvalues by reference, but for our purposes it's + // inconvenient, since it constrains us to be const, whereas the allocator + // actually changes when it does allocations. + auto result = m_impl.template add<BaseTranslator>( + std::forward<IncomingValueType>(value), *this->getAllocator()); + if (result.isNewEntry) + appendNode(*result.storedValue); + return AddResult(*result.storedValue, result.isNewEntry); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename IncomingValueType> +typename ListHashSet<T, inlineCapacity, U, V>::iterator +ListHashSet<T, inlineCapacity, U, V>::addReturnIterator( + IncomingValueType&& value) { + return makeIterator(insert(std::forward<IncomingValueType>(value)).m_node); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename IncomingValueType> +typename ListHashSet<T, inlineCapacity, U, V>::AddResult +ListHashSet<T, inlineCapacity, U, V>::appendOrMoveToLast( + IncomingValueType&& value) { + createAllocatorIfNeeded(); + auto result = m_impl.template add<BaseTranslator>( + std::forward<IncomingValueType>(value), *this->getAllocator()); + Node* node = *result.storedValue; + if (!result.isNewEntry) + unlink(node); + appendNode(node); + return AddResult(*result.storedValue, result.isNewEntry); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename IncomingValueType> +typename ListHashSet<T, inlineCapacity, U, V>::AddResult +ListHashSet<T, inlineCapacity, U, V>::prependOrMoveToFirst( + IncomingValueType&& value) { + createAllocatorIfNeeded(); + auto result = m_impl.template add<BaseTranslator>( + std::forward<IncomingValueType>(value), *this->getAllocator()); + Node* node = *result.storedValue; + if (!result.isNewEntry) + unlink(node); + prependNode(node); + return AddResult(*result.storedValue, result.isNewEntry); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename IncomingValueType> +typename ListHashSet<T, inlineCapacity, U, V>::AddResult +ListHashSet<T, inlineCapacity, U, V>::insertBefore( + iterator it, + IncomingValueType&& newValue) { + createAllocatorIfNeeded(); + auto result = m_impl.template add<BaseTranslator>( + std::forward<IncomingValueType>(newValue), *this->getAllocator()); + if (result.isNewEntry) + insertNodeBefore(it.getNode(), *result.storedValue); + return AddResult(*result.storedValue, result.isNewEntry); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename IncomingValueType> +typename ListHashSet<T, inlineCapacity, U, V>::AddResult +ListHashSet<T, inlineCapacity, U, V>::insertBefore( + ValuePeekInType beforeValue, + IncomingValueType&& newValue) { + createAllocatorIfNeeded(); + return insertBefore(find(beforeValue), + std::forward<IncomingValueType>(newValue)); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline void ListHashSet<T, inlineCapacity, U, V>::erase(iterator it) { + if (it == end()) + return; + m_impl.remove(it.getNode()); + unlinkAndDelete(it.getNode()); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +inline void ListHashSet<T, inlineCapacity, U, V>::clear() { + deleteAllNodes(); + m_impl.clear(); + m_head = nullptr; + m_tail = nullptr; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +auto ListHashSet<T, inlineCapacity, U, V>::take(iterator it) -> ValueType { + if (it == end()) + return ValueTraits::emptyValue(); + + m_impl.remove(it.getNode()); + ValueType result = std::move(it.getNode()->m_value); + unlinkAndDelete(it.getNode()); + + return result; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +auto ListHashSet<T, inlineCapacity, U, V>::take(ValuePeekInType value) + -> ValueType { + return take(find(value)); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +auto ListHashSet<T, inlineCapacity, U, V>::takeFirst() -> ValueType { + DCHECK(!isEmpty()); + m_impl.remove(m_head); + ValueType result = std::move(m_head->m_value); + unlinkAndDelete(m_head); + + return result; +} + +template <typename T, size_t inlineCapacity, typename U, typename Allocator> +void ListHashSet<T, inlineCapacity, U, Allocator>::unlink(Node* node) { + if (!node->m_prev) { + DCHECK_EQ(node, m_head); + m_head = node->next(); + } else { + DCHECK_NE(node, m_head); + node->m_prev->m_next = node->m_next; + } + + if (!node->m_next) { + DCHECK_EQ(node, m_tail); + m_tail = node->prev(); + } else { + DCHECK_NE(node, m_tail); + node->m_next->m_prev = node->m_prev; + } +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +void ListHashSet<T, inlineCapacity, U, V>::unlinkAndDelete(Node* node) { + unlink(node); + node->destroy(this->getAllocator()); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +void ListHashSet<T, inlineCapacity, U, V>::appendNode(Node* node) { + node->m_prev = m_tail; + node->m_next = nullptr; + + if (m_tail) { + DCHECK(m_head); + m_tail->m_next = node; + } else { + DCHECK(!m_head); + m_head = node; + } + + m_tail = node; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +void ListHashSet<T, inlineCapacity, U, V>::prependNode(Node* node) { + node->m_prev = nullptr; + node->m_next = m_head; + + if (m_head) + m_head->m_prev = node; + else + m_tail = node; + + m_head = node; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +void ListHashSet<T, inlineCapacity, U, V>::insertNodeBefore(Node* beforeNode, + Node* newNode) { + if (!beforeNode) + return appendNode(newNode); + + newNode->m_next = beforeNode; + newNode->m_prev = beforeNode->m_prev; + if (beforeNode->m_prev) + beforeNode->m_prev->m_next = newNode; + beforeNode->m_prev = newNode; + + if (!newNode->m_prev) + m_head = newNode; +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +void ListHashSet<T, inlineCapacity, U, V>::deleteAllNodes() { + if (!m_head) + return; + + for (Node *node = m_head, *next = m_head->next(); node; + node = next, next = node ? node->next() : 0) + node->destroy(this->getAllocator()); +} + +template <typename T, size_t inlineCapacity, typename U, typename V> +template <typename VisitorDispatcher> +void ListHashSet<T, inlineCapacity, U, V>::trace(VisitorDispatcher visitor) { + static_assert(HashTraits<T>::weakHandlingFlag == NoWeakHandlingInCollections, + "HeapListHashSet does not support weakness, consider using " + "HeapLinkedHashSet instead."); + // This marks all the nodes and their contents live that can be accessed + // through the HashTable. That includes m_head and m_tail so we do not have + // to explicitly trace them here. + m_impl.trace(visitor); +} + +} // namespace WTF + +using WTF::ListHashSet; + +#endif // WTF_ListHashSet_h
diff --git a/third_party/WebKit/Source/platform/wtf/PrintStream.h b/third_party/WebKit/Source/platform/wtf/PrintStream.h new file mode 100644 index 0000000..bed90af --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/PrintStream.h
@@ -0,0 +1,134 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PrintStream_h +#define PrintStream_h + +#include "platform/wtf/Allocator.h" +#include "platform/wtf/Compiler.h" +#include "platform/wtf/Noncopyable.h" +#include "platform/wtf/StdLibExtras.h" +#include "platform/wtf/WTFExport.h" +#include <stdarg.h> + +namespace WTF { + +class CString; +class String; + +class WTF_EXPORT PrintStream { + USING_FAST_MALLOC(PrintStream); + WTF_MAKE_NONCOPYABLE(PrintStream); + + public: + PrintStream(); + virtual ~PrintStream(); + + PRINTF_FORMAT(2, 3) void printf(const char* format, ...); + PRINTF_FORMAT(2, 0) virtual void vprintf(const char* format, va_list) = 0; + + // Typically a no-op for many subclasses of PrintStream, this is a hint that + // the implementation should flush its buffers if it had not done so already. + virtual void flush(); + + template <typename T> + void print(const T& value) { + printInternal(*this, value); + } + + template <typename T1, typename... RemainingTypes> + void print(const T1& value1, const RemainingTypes&... values) { + print(value1); + print(values...); + } +}; + +WTF_EXPORT void printInternal(PrintStream&, const char*); +WTF_EXPORT void printInternal(PrintStream&, const CString&); +WTF_EXPORT void printInternal(PrintStream&, const String&); +inline void printInternal(PrintStream& out, char* value) { + printInternal(out, static_cast<const char*>(value)); +} +inline void printInternal(PrintStream& out, CString& value) { + printInternal(out, static_cast<const CString&>(value)); +} +inline void printInternal(PrintStream& out, String& value) { + printInternal(out, static_cast<const String&>(value)); +} +WTF_EXPORT void printInternal(PrintStream&, bool); +WTF_EXPORT void printInternal(PrintStream&, int); +WTF_EXPORT void printInternal(PrintStream&, unsigned); +WTF_EXPORT void printInternal(PrintStream&, long); +WTF_EXPORT void printInternal(PrintStream&, unsigned long); +WTF_EXPORT void printInternal(PrintStream&, long long); +WTF_EXPORT void printInternal(PrintStream&, unsigned long long); +WTF_EXPORT void printInternal(PrintStream&, float); +WTF_EXPORT void printInternal(PrintStream&, double); + +template <typename T> +void printInternal(PrintStream& out, const T& value) { + value.dump(out); +} + +#define MAKE_PRINT_ADAPTOR(Name, Type, function) \ + class Name final { \ + STACK_ALLOCATED(); \ + \ + public: \ + Name(const Type& value) : m_value(value) {} \ + void dump(PrintStream& out) const { function(out, m_value); } \ + \ + private: \ + Type m_value; \ + } + +#define MAKE_PRINT_METHOD_ADAPTOR(Name, Type, method) \ + class Name final { \ + STACK_ALLOCATED(); \ + \ + public: \ + Name(const Type& value) : m_value(value) {} \ + void dump(PrintStream& out) const { m_value.method(out); } \ + \ + private: \ + const Type& m_value; \ + } + +#define MAKE_PRINT_METHOD(Type, dumpMethod, method) \ + MAKE_PRINT_METHOD_ADAPTOR(DumperFor_##method, Type, dumpMethod); \ + DumperFor_##method method() const { return DumperFor_##method(*this); } + +// Use an adaptor-based dumper for characters to avoid situations where +// you've "compressed" an integer to a character and it ends up printing +// as ASCII when you wanted it to print as a number. +void dumpCharacter(PrintStream&, char); +MAKE_PRINT_ADAPTOR(CharacterDump, char, dumpCharacter); + +} // namespace WTF + +using WTF::CharacterDump; +using WTF::PrintStream; + +#endif // PrintStream_h
diff --git a/third_party/WebKit/Source/platform/wtf/Vector.h b/third_party/WebKit/Source/platform/wtf/Vector.h new file mode 100644 index 0000000..88e343d --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/Vector.h
@@ -0,0 +1,1910 @@ +/* + * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_Vector_h +#define WTF_Vector_h + +#include "platform/wtf/Alignment.h" +#include "platform/wtf/ConditionalDestructor.h" +#include "platform/wtf/ContainerAnnotations.h" +#include "platform/wtf/Noncopyable.h" +#include "platform/wtf/NotFound.h" +#include "platform/wtf/StdLibExtras.h" +#include "platform/wtf/VectorTraits.h" +#include "platform/wtf/allocator/PartitionAllocator.h" +#include <algorithm> +#include <initializer_list> +#include <iterator> +#include <string.h> +#include <utility> + +// For ASAN builds, disable inline buffers completely as they cause various +// issues. +#ifdef ANNOTATE_CONTIGUOUS_CONTAINER +#define INLINE_CAPACITY 0 +#else +#define INLINE_CAPACITY inlineCapacity +#endif + +namespace WTF { + +#if defined(MEMORY_SANITIZER_INITIAL_SIZE) +static const size_t kInitialVectorSize = 1; +#else +#ifndef WTF_VECTOR_INITIAL_SIZE +#define WTF_VECTOR_INITIAL_SIZE 4 +#endif +static const size_t kInitialVectorSize = WTF_VECTOR_INITIAL_SIZE; +#endif + +template <typename T, size_t inlineBuffer, typename Allocator> +class Deque; + +// +// Vector Traits +// + +// Bunch of traits for Vector are defined here, with which you can customize +// Vector's behavior. In most cases the default traits are appropriate, so you +// usually don't have to specialize those traits by yourself. +// +// The behavior of the implementation below can be controlled by VectorTraits. +// If you want to change the behavior of your type, take a look at VectorTraits +// (defined in VectorTraits.h), too. + +template <bool needsDestruction, typename T> +struct VectorDestructor; + +template <typename T> +struct VectorDestructor<false, T> { + STATIC_ONLY(VectorDestructor); + static void destruct(T*, T*) {} +}; + +template <typename T> +struct VectorDestructor<true, T> { + STATIC_ONLY(VectorDestructor); + static void destruct(T* begin, T* end) { + for (T* cur = begin; cur != end; ++cur) + cur->~T(); + } +}; + +template <bool unusedSlotsMustBeZeroed, typename T> +struct VectorUnusedSlotClearer; + +template <typename T> +struct VectorUnusedSlotClearer<false, T> { + STATIC_ONLY(VectorUnusedSlotClearer); + static void clear(T*, T*) {} +#if DCHECK_IS_ON() + static void checkCleared(const T*, const T*) {} +#endif +}; + +template <typename T> +struct VectorUnusedSlotClearer<true, T> { + STATIC_ONLY(VectorUnusedSlotClearer); + static void clear(T* begin, T* end) { + memset(reinterpret_cast<void*>(begin), 0, sizeof(T) * (end - begin)); + } + +#if DCHECK_IS_ON() + static void checkCleared(const T* begin, const T* end) { + const unsigned char* unusedArea = + reinterpret_cast<const unsigned char*>(begin); + const unsigned char* endAddress = + reinterpret_cast<const unsigned char*>(end); + DCHECK_GE(endAddress, unusedArea); + for (int i = 0; i < endAddress - unusedArea; ++i) + DCHECK(!unusedArea[i]); + } +#endif +}; + +template <bool canInitializeWithMemset, typename T> +struct VectorInitializer; + +template <typename T> +struct VectorInitializer<false, T> { + STATIC_ONLY(VectorInitializer); + static void initialize(T* begin, T* end) { + for (T* cur = begin; cur != end; ++cur) + new (NotNull, cur) T; + } +}; + +template <typename T> +struct VectorInitializer<true, T> { + STATIC_ONLY(VectorInitializer); + static void initialize(T* begin, T* end) { + memset(begin, 0, + reinterpret_cast<char*>(end) - reinterpret_cast<char*>(begin)); + } +}; + +template <bool canMoveWithMemcpy, typename T> +struct VectorMover; + +template <typename T> +struct VectorMover<false, T> { + STATIC_ONLY(VectorMover); + static void move(T* src, T* srcEnd, T* dst) { + while (src != srcEnd) { + new (NotNull, dst) T(std::move(*src)); + src->~T(); + ++dst; + ++src; + } + } + static void moveOverlapping(T* src, T* srcEnd, T* dst) { + if (src > dst) { + move(src, srcEnd, dst); + } else { + T* dstEnd = dst + (srcEnd - src); + while (src != srcEnd) { + --srcEnd; + --dstEnd; + new (NotNull, dstEnd) T(std::move(*srcEnd)); + srcEnd->~T(); + } + } + } + static void swap(T* src, T* srcEnd, T* dst) { + std::swap_ranges(src, srcEnd, dst); + } +}; + +template <typename T> +struct VectorMover<true, T> { + STATIC_ONLY(VectorMover); + static void move(const T* src, const T* srcEnd, T* dst) { + if (LIKELY(dst && src)) + memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - + reinterpret_cast<const char*>(src)); + } + static void moveOverlapping(const T* src, const T* srcEnd, T* dst) { + if (LIKELY(dst && src)) + memmove(dst, src, reinterpret_cast<const char*>(srcEnd) - + reinterpret_cast<const char*>(src)); + } + static void swap(T* src, T* srcEnd, T* dst) { + std::swap_ranges(reinterpret_cast<char*>(src), + reinterpret_cast<char*>(srcEnd), + reinterpret_cast<char*>(dst)); + } +}; + +template <bool canCopyWithMemcpy, typename T> +struct VectorCopier; + +template <typename T> +struct VectorCopier<false, T> { + STATIC_ONLY(VectorCopier); + template <typename U> + static void uninitializedCopy(const U* src, const U* srcEnd, T* dst) { + while (src != srcEnd) { + new (NotNull, dst) T(*src); + ++dst; + ++src; + } + } +}; + +template <typename T> +struct VectorCopier<true, T> { + STATIC_ONLY(VectorCopier); + static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { + if (LIKELY(dst && src)) + memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - + reinterpret_cast<const char*>(src)); + } + template <typename U> + static void uninitializedCopy(const U* src, const U* srcEnd, T* dst) { + VectorCopier<false, T>::uninitializedCopy(src, srcEnd, dst); + } +}; + +template <bool canFillWithMemset, typename T> +struct VectorFiller; + +template <typename T> +struct VectorFiller<false, T> { + STATIC_ONLY(VectorFiller); + static void uninitializedFill(T* dst, T* dstEnd, const T& val) { + while (dst != dstEnd) { + new (NotNull, dst) T(val); + ++dst; + } + } +}; + +template <typename T> +struct VectorFiller<true, T> { + STATIC_ONLY(VectorFiller); + static void uninitializedFill(T* dst, T* dstEnd, const T& val) { + static_assert(sizeof(T) == sizeof(char), "size of type should be one"); +#if COMPILER(GCC) && defined(_FORTIFY_SOURCE) + if (!__builtin_constant_p(dstEnd - dst) || (!(dstEnd - dst))) + memset(dst, val, dstEnd - dst); +#else + memset(dst, val, dstEnd - dst); +#endif + } +}; + +template <bool canCompareWithMemcmp, typename T> +struct VectorComparer; + +template <typename T> +struct VectorComparer<false, T> { + STATIC_ONLY(VectorComparer); + static bool compare(const T* a, const T* b, size_t size) { + DCHECK(a); + DCHECK(b); + return std::equal(a, a + size, b); + } +}; + +template <typename T> +struct VectorComparer<true, T> { + STATIC_ONLY(VectorComparer); + static bool compare(const T* a, const T* b, size_t size) { + DCHECK(a); + DCHECK(b); + return memcmp(a, b, sizeof(T) * size) == 0; + } +}; + +template <typename T> +struct VectorElementComparer { + STATIC_ONLY(VectorElementComparer); + template <typename U> + static bool compareElement(const T& left, const U& right) { + return left == right; + } +}; + +template <typename T> +struct VectorElementComparer<std::unique_ptr<T>> { + STATIC_ONLY(VectorElementComparer); + template <typename U> + static bool compareElement(const std::unique_ptr<T>& left, const U& right) { + return left.get() == right; + } +}; + +// A collection of all the traits used by Vector. This is basically an +// implementation detail of Vector, and you probably don't want to change this. +// If you want to customize Vector's behavior, you should specialize +// VectorTraits instead (defined in VectorTraits.h). +template <typename T> +struct VectorTypeOperations { + STATIC_ONLY(VectorTypeOperations); + static void destruct(T* begin, T* end) { + VectorDestructor<VectorTraits<T>::needsDestruction, T>::destruct(begin, + end); + } + + static void initialize(T* begin, T* end) { + VectorInitializer<VectorTraits<T>::canInitializeWithMemset, T>::initialize( + begin, end); + } + + static void move(T* src, T* srcEnd, T* dst) { + VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::move(src, srcEnd, dst); + } + + static void moveOverlapping(T* src, T* srcEnd, T* dst) { + VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::moveOverlapping( + src, srcEnd, dst); + } + + static void swap(T* src, T* srcEnd, T* dst) { + VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::swap(src, srcEnd, dst); + } + + static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { + VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy( + src, srcEnd, dst); + } + + static void uninitializedFill(T* dst, T* dstEnd, const T& val) { + VectorFiller<VectorTraits<T>::canFillWithMemset, T>::uninitializedFill( + dst, dstEnd, val); + } + + static bool compare(const T* a, const T* b, size_t size) { + return VectorComparer<VectorTraits<T>::canCompareWithMemcmp, T>::compare( + a, b, size); + } + + template <typename U> + static bool compareElement(const T& left, U&& right) { + return VectorElementComparer<T>::compareElement(left, + std::forward<U>(right)); + } +}; + +// +// VectorBuffer +// + +// VectorBuffer is an implementation detail of Vector and Deque. It manages +// Vector's underlying buffer, and does operations like allocation or +// expansion. +// +// Not meant for general consumption. + +template <typename T, bool hasInlineCapacity, typename Allocator> +class VectorBufferBase { + WTF_MAKE_NONCOPYABLE(VectorBufferBase); + DISALLOW_NEW(); + + public: + void allocateBuffer(size_t newCapacity) { + DCHECK(newCapacity); + DCHECK_LE(newCapacity, + Allocator::template maxElementCountInBackingStore<T>()); + size_t sizeToAllocate = allocationSize(newCapacity); + if (hasInlineCapacity) + m_buffer = + Allocator::template allocateInlineVectorBacking<T>(sizeToAllocate); + else + m_buffer = Allocator::template allocateVectorBacking<T>(sizeToAllocate); + m_capacity = sizeToAllocate / sizeof(T); + } + + void allocateExpandedBuffer(size_t newCapacity) { + DCHECK(newCapacity); + size_t sizeToAllocate = allocationSize(newCapacity); + if (hasInlineCapacity) + m_buffer = + Allocator::template allocateInlineVectorBacking<T>(sizeToAllocate); + else + m_buffer = + Allocator::template allocateExpandedVectorBacking<T>(sizeToAllocate); + m_capacity = sizeToAllocate / sizeof(T); + } + + size_t allocationSize(size_t capacity) const { + return Allocator::template quantizedSize<T>(capacity); + } + + T* buffer() { return m_buffer; } + const T* buffer() const { return m_buffer; } + size_t capacity() const { return m_capacity; } + + void clearUnusedSlots(T* from, T* to) { + // If the vector backing is garbage-collected and needs tracing or + // finalizing, we clear out the unused slots so that the visitor or the + // finalizer does not cause a problem when visiting the unused slots. + VectorUnusedSlotClearer< + Allocator::isGarbageCollected && + (VectorTraits<T>::needsDestruction || + IsTraceableInCollectionTrait<VectorTraits<T>>::value), + T>::clear(from, to); + } + + void checkUnusedSlots(const T* from, const T* to) { +#if DCHECK_IS_ON() && !defined(ANNOTATE_CONTIGUOUS_CONTAINER) + VectorUnusedSlotClearer< + Allocator::isGarbageCollected && + (VectorTraits<T>::needsDestruction || + IsTraceableInCollectionTrait<VectorTraits<T>>::value), + T>::checkCleared(from, to); +#endif + } + + // |end| is exclusive, a la STL. + struct OffsetRange final { + OffsetRange() : begin(0), end(0) {} + explicit OffsetRange(size_t begin, size_t end) : begin(begin), end(end) { + DCHECK_LE(begin, end); + } + bool empty() const { return begin == end; } + size_t begin; + size_t end; + }; + + protected: + VectorBufferBase() : m_buffer(nullptr), m_capacity(0) {} + + VectorBufferBase(T* buffer, size_t capacity) + : m_buffer(buffer), m_capacity(capacity) {} + + T* m_buffer; + unsigned m_capacity; + unsigned m_size; +}; + +template <typename T, + size_t inlineCapacity, + typename Allocator = PartitionAllocator> +class VectorBuffer; + +template <typename T, typename Allocator> +class VectorBuffer<T, 0, Allocator> + : protected VectorBufferBase<T, false, Allocator> { + private: + using Base = VectorBufferBase<T, false, Allocator>; + + public: + using OffsetRange = typename Base::OffsetRange; + + VectorBuffer() {} + + explicit VectorBuffer(size_t capacity) { + // Calling malloc(0) might take a lock and may actually do an allocation + // on some systems. + if (capacity) + allocateBuffer(capacity); + } + + void destruct() { + deallocateBuffer(m_buffer); + m_buffer = nullptr; + } + + void deallocateBuffer(T* bufferToDeallocate) { + Allocator::freeVectorBacking(bufferToDeallocate); + } + + bool expandBuffer(size_t newCapacity) { + size_t sizeToAllocate = allocationSize(newCapacity); + if (Allocator::expandVectorBacking(m_buffer, sizeToAllocate)) { + m_capacity = sizeToAllocate / sizeof(T); + return true; + } + return false; + } + + inline bool shrinkBuffer(size_t newCapacity) { + DCHECK_LT(newCapacity, capacity()); + size_t sizeToAllocate = allocationSize(newCapacity); + if (Allocator::shrinkVectorBacking(m_buffer, allocationSize(capacity()), + sizeToAllocate)) { + m_capacity = sizeToAllocate / sizeof(T); + return true; + } + return false; + } + + void resetBufferPointer() { + m_buffer = nullptr; + m_capacity = 0; + } + + // See the other specialization for the meaning of |thisHole| and |otherHole|. + // They are irrelevant in this case. + void swapVectorBuffer(VectorBuffer<T, 0, Allocator>& other, + OffsetRange thisHole, + OffsetRange otherHole) { + static_assert(VectorTraits<T>::canSwapUsingCopyOrMove, + "Cannot swap HeapVectors of TraceWrapperMembers."); + + std::swap(m_buffer, other.m_buffer); + std::swap(m_capacity, other.m_capacity); + std::swap(m_size, other.m_size); + } + + using Base::allocateBuffer; + using Base::allocationSize; + + using Base::buffer; + using Base::capacity; + + using Base::clearUnusedSlots; + using Base::checkUnusedSlots; + + bool hasOutOfLineBuffer() const { + // When inlineCapacity is 0 we have an out of line buffer if we have a + // buffer. + return buffer(); + } + + T** bufferSlot() { return &m_buffer; } + + protected: + using Base::m_size; + + private: + using Base::m_buffer; + using Base::m_capacity; +}; + +template <typename T, size_t inlineCapacity, typename Allocator> +class VectorBuffer : protected VectorBufferBase<T, true, Allocator> { + WTF_MAKE_NONCOPYABLE(VectorBuffer); + + private: + using Base = VectorBufferBase<T, true, Allocator>; + + public: + using OffsetRange = typename Base::OffsetRange; + + VectorBuffer() : Base(inlineBuffer(), inlineCapacity) {} + + explicit VectorBuffer(size_t capacity) + : Base(inlineBuffer(), inlineCapacity) { + if (capacity > inlineCapacity) + Base::allocateBuffer(capacity); + } + + void destruct() { + deallocateBuffer(m_buffer); + m_buffer = nullptr; + } + + NEVER_INLINE void reallyDeallocateBuffer(T* bufferToDeallocate) { + Allocator::freeInlineVectorBacking(bufferToDeallocate); + } + + void deallocateBuffer(T* bufferToDeallocate) { + if (UNLIKELY(bufferToDeallocate != inlineBuffer())) + reallyDeallocateBuffer(bufferToDeallocate); + } + + bool expandBuffer(size_t newCapacity) { + DCHECK_GT(newCapacity, inlineCapacity); + if (m_buffer == inlineBuffer()) + return false; + + size_t sizeToAllocate = allocationSize(newCapacity); + if (Allocator::expandInlineVectorBacking(m_buffer, sizeToAllocate)) { + m_capacity = sizeToAllocate / sizeof(T); + return true; + } + return false; + } + + inline bool shrinkBuffer(size_t newCapacity) { + DCHECK_LT(newCapacity, capacity()); + if (newCapacity <= inlineCapacity) { + // We need to switch to inlineBuffer. Vector::shrinkCapacity will + // handle it. + return false; + } + DCHECK_NE(m_buffer, inlineBuffer()); + size_t newSize = allocationSize(newCapacity); + if (!Allocator::shrinkInlineVectorBacking( + m_buffer, allocationSize(capacity()), newSize)) + return false; + m_capacity = newSize / sizeof(T); + return true; + } + + void resetBufferPointer() { + m_buffer = inlineBuffer(); + m_capacity = inlineCapacity; + } + + void allocateBuffer(size_t newCapacity) { + // FIXME: This should DCHECK(!m_buffer) to catch misuse/leaks. + if (newCapacity > inlineCapacity) + Base::allocateBuffer(newCapacity); + else + resetBufferPointer(); + } + + void allocateExpandedBuffer(size_t newCapacity) { + if (newCapacity > inlineCapacity) + Base::allocateExpandedBuffer(newCapacity); + else + resetBufferPointer(); + } + + size_t allocationSize(size_t capacity) const { + if (capacity <= inlineCapacity) + return m_inlineBufferSize; + return Base::allocationSize(capacity); + } + + // Swap two vector buffers, both of which have the same non-zero inline + // capacity. + // + // If the data is in an out-of-line buffer, we can just pass the pointers + // across the two buffers. If the data is in an inline buffer, we need to + // either swap or move each element, depending on whether each slot is + // occupied or not. + // + // Further complication comes from the fact that VectorBuffer is also used as + // the backing store of a Deque. Deque allocates the objects like a ring + // buffer, so there may be a "hole" (unallocated region) in the middle of the + // buffer. This function assumes elements in a range [m_buffer, m_buffer + + // m_size) are all allocated except for elements within |thisHole|. The same + // applies for |other.m_buffer| and |otherHole|. + void swapVectorBuffer(VectorBuffer<T, inlineCapacity, Allocator>& other, + OffsetRange thisHole, + OffsetRange otherHole) { + using TypeOperations = VectorTypeOperations<T>; + + static_assert(VectorTraits<T>::canSwapUsingCopyOrMove, + "Cannot swap HeapVectors of TraceWrapperMembers."); + + if (buffer() != inlineBuffer() && other.buffer() != other.inlineBuffer()) { + // The easiest case: both buffers are non-inline. We just need to swap the + // pointers. + std::swap(m_buffer, other.m_buffer); + std::swap(m_capacity, other.m_capacity); + std::swap(m_size, other.m_size); + return; + } + + Allocator::enterGCForbiddenScope(); + + // Otherwise, we at least need to move some elements from one inline buffer + // to another. + // + // Terminology: "source" is a place from which elements are copied, and + // "destination" is a place to which elements are copied. thisSource or + // otherSource can be empty (represented by nullptr) when this range or + // other range is in an out-of-line buffer. + // + // We first record which range needs to get moved and where elements in such + // a range will go. Elements in an inline buffer will go to the other + // buffer's inline buffer. Elements in an out-of-line buffer won't move, + // because we can just swap pointers of out-of-line buffers. + T* thisSourceBegin = nullptr; + size_t thisSourceSize = 0; + T* thisDestinationBegin = nullptr; + if (buffer() == inlineBuffer()) { + thisSourceBegin = buffer(); + thisSourceSize = m_size; + thisDestinationBegin = other.inlineBuffer(); + if (!thisHole.empty()) { // Sanity check. + DCHECK_LT(thisHole.begin, thisHole.end); + DCHECK_LE(thisHole.end, thisSourceSize); + } + } else { + // We don't need the hole information for an out-of-line buffer. + thisHole.begin = thisHole.end = 0; + } + T* otherSourceBegin = nullptr; + size_t otherSourceSize = 0; + T* otherDestinationBegin = nullptr; + if (other.buffer() == other.inlineBuffer()) { + otherSourceBegin = other.buffer(); + otherSourceSize = other.m_size; + otherDestinationBegin = inlineBuffer(); + if (!otherHole.empty()) { + DCHECK_LT(otherHole.begin, otherHole.end); + DCHECK_LE(otherHole.end, otherSourceSize); + } + } else { + otherHole.begin = otherHole.end = 0; + } + + // Next, we mutate members and do other bookkeeping. We do pointer swapping + // (for out-of-line buffers) here if we can. From now on, don't assume + // buffer() or capacity() maintains their original values. + std::swap(m_capacity, other.m_capacity); + if (thisSourceBegin && + !otherSourceBegin) { // Our buffer is inline, theirs is not. + DCHECK_EQ(buffer(), inlineBuffer()); + DCHECK_NE(other.buffer(), other.inlineBuffer()); + ANNOTATE_DELETE_BUFFER(m_buffer, inlineCapacity, m_size); + m_buffer = other.buffer(); + other.m_buffer = other.inlineBuffer(); + std::swap(m_size, other.m_size); + ANNOTATE_NEW_BUFFER(other.m_buffer, inlineCapacity, other.m_size); + } else if (!thisSourceBegin && + otherSourceBegin) { // Their buffer is inline, ours is not. + DCHECK_NE(buffer(), inlineBuffer()); + DCHECK_EQ(other.buffer(), other.inlineBuffer()); + ANNOTATE_DELETE_BUFFER(other.m_buffer, inlineCapacity, other.m_size); + other.m_buffer = buffer(); + m_buffer = inlineBuffer(); + std::swap(m_size, other.m_size); + ANNOTATE_NEW_BUFFER(m_buffer, inlineCapacity, m_size); + } else { // Both buffers are inline. + DCHECK(thisSourceBegin); + DCHECK(otherSourceBegin); + DCHECK_EQ(buffer(), inlineBuffer()); + DCHECK_EQ(other.buffer(), other.inlineBuffer()); + ANNOTATE_CHANGE_SIZE(m_buffer, inlineCapacity, m_size, other.m_size); + ANNOTATE_CHANGE_SIZE(other.m_buffer, inlineCapacity, other.m_size, + m_size); + std::swap(m_size, other.m_size); + } + + // We are ready to move elements. We determine an action for each "section", + // which is a contiguous range such that all elements in the range are + // treated similarly. + size_t sectionBegin = 0; + while (sectionBegin < inlineCapacity) { + // To determine the end of this section, we list up all the boundaries + // where the "occupiedness" may change. + size_t sectionEnd = inlineCapacity; + if (thisSourceBegin && sectionBegin < thisSourceSize) + sectionEnd = std::min(sectionEnd, thisSourceSize); + if (!thisHole.empty() && sectionBegin < thisHole.begin) + sectionEnd = std::min(sectionEnd, thisHole.begin); + if (!thisHole.empty() && sectionBegin < thisHole.end) + sectionEnd = std::min(sectionEnd, thisHole.end); + if (otherSourceBegin && sectionBegin < otherSourceSize) + sectionEnd = std::min(sectionEnd, otherSourceSize); + if (!otherHole.empty() && sectionBegin < otherHole.begin) + sectionEnd = std::min(sectionEnd, otherHole.begin); + if (!otherHole.empty() && sectionBegin < otherHole.end) + sectionEnd = std::min(sectionEnd, otherHole.end); + + DCHECK_LT(sectionBegin, sectionEnd); + + // Is the |sectionBegin|-th element of |thisSource| occupied? + bool thisOccupied = false; + if (thisSourceBegin && sectionBegin < thisSourceSize) { + // Yes, it's occupied, unless the position is in a hole. + if (thisHole.empty() || sectionBegin < thisHole.begin || + sectionBegin >= thisHole.end) + thisOccupied = true; + } + bool otherOccupied = false; + if (otherSourceBegin && sectionBegin < otherSourceSize) { + if (otherHole.empty() || sectionBegin < otherHole.begin || + sectionBegin >= otherHole.end) + otherOccupied = true; + } + + if (thisOccupied && otherOccupied) { + // Both occupied; swap them. In this case, one's destination must be the + // other's source (i.e. both ranges are in inline buffers). + DCHECK_EQ(thisDestinationBegin, otherSourceBegin); + DCHECK_EQ(otherDestinationBegin, thisSourceBegin); + TypeOperations::swap(thisSourceBegin + sectionBegin, + thisSourceBegin + sectionEnd, + otherSourceBegin + sectionBegin); + } else if (thisOccupied) { + // Move from ours to theirs. + TypeOperations::move(thisSourceBegin + sectionBegin, + thisSourceBegin + sectionEnd, + thisDestinationBegin + sectionBegin); + Base::clearUnusedSlots(thisSourceBegin + sectionBegin, + thisSourceBegin + sectionEnd); + } else if (otherOccupied) { + // Move from theirs to ours. + TypeOperations::move(otherSourceBegin + sectionBegin, + otherSourceBegin + sectionEnd, + otherDestinationBegin + sectionBegin); + Base::clearUnusedSlots(otherSourceBegin + sectionBegin, + otherSourceBegin + sectionEnd); + } else { + // Both empty; nothing to do. + } + + sectionBegin = sectionEnd; + } + + Allocator::leaveGCForbiddenScope(); + } + + using Base::buffer; + using Base::capacity; + + bool hasOutOfLineBuffer() const { + return buffer() && buffer() != inlineBuffer(); + } + + T** bufferSlot() { return &m_buffer; } + + protected: + using Base::m_size; + + private: + using Base::m_buffer; + using Base::m_capacity; + + static const size_t m_inlineBufferSize = inlineCapacity * sizeof(T); + T* inlineBuffer() { return reinterpret_cast_ptr<T*>(m_inlineBuffer.buffer); } + const T* inlineBuffer() const { + return reinterpret_cast_ptr<const T*>(m_inlineBuffer.buffer); + } + + AlignedBuffer<m_inlineBufferSize, WTF_ALIGN_OF(T)> m_inlineBuffer; + template <typename U, size_t inlineBuffer, typename V> + friend class Deque; +}; + +// +// Vector +// + +// Vector is a container that works just like std::vector. WTF's Vector has +// several extra functionalities: inline buffer, behavior customization via +// traits, and Oilpan support. Those are explained in the sections below. +// +// Vector is the most basic container, which stores its element in a contiguous +// buffer. The buffer is expanded automatically when necessary. The elements +// are automatically moved to the new buffer. This event is called a +// reallocation. A reallocation takes O(N)-time (N = number of elements), but +// its occurrences are rare, so its time cost should not be significant, +// compared to the time cost of other operations to the vector. +// +// Time complexity of key operations is as follows: +// +// * Indexed access -- O(1) +// * Insertion or removal of an element at the end -- amortized O(1) +// * Other insertion or removal -- O(N) +// * Swapping with another vector -- O(1) +// +// 1. Iterator invalidation semantics +// +// Vector provides STL-compatible iterators and reverse iterators. Iterators +// are _invalidated_ on certain occasions. Reading an invalidated iterator +// causes undefined behavior. +// +// Iterators are invalidated on the following situations: +// +// * When a reallocation happens on a vector, all the iterators for that +// vector will be invalidated. +// * Some member functions invalidate part of the existing iterators for +// the vector; see comments on the individual functions. +// * [Oilpan only] Heap compaction invalidates all the iterators for any +// HeapVectors. This means you can only store an iterator on stack, as +// a local variable. +// +// In this context, pointers or references to an element of a Vector are +// essentially equivalent to iterators, in that they also become invalid +// whenever corresponding iterators are invalidated. +// +// 2. Inline buffer +// +// Vectors may have an _inline buffer_. An inline buffer is a storage area +// that is contained in the vector itself, along with other metadata like +// m_size. It is used as a storage space when the vector's elements fit in +// that space. If the inline buffer becomes full and further space is +// necessary, an out-of-line buffer is allocated in the heap, and it will +// take over the role of the inline buffer. +// +// The existence of an inline buffer is indicated by non-zero |inlineCapacity| +// template argument. The value represents the number of elements that can be +// stored in the inline buffer. Zero |inlineCapacity| means the vector has no +// inline buffer. +// +// An inline buffer increases the size of the Vector instances, and, in trade +// for that, it gives you several performance benefits, as long as the number +// of elements do not exceed |inlineCapacity|: +// +// * No heap allocation will be made. +// * Memory locality will improve. +// +// Generally, having an inline buffer is useful for vectors that (1) are +// frequently accessed or modified, and (2) contain only a few elements at +// most. +// +// 3. Behavior customization +// +// You usually do not need to customize Vector's behavior, since the default +// behavior is appropriate for normal usage. The behavior is controlled by +// VectorTypeOperations traits template above. Read VectorTypeOperations +// and VectorTraits if you want to change the behavior for your types (i.e. +// if you really want faster vector operations). +// +// The default traits basically do the following: +// +// * Skip constructor call and fill zeros with memset for simple types; +// * Skip destructor call for simple types; +// * Copy or move by memcpy for simple types; and +// * Customize the comparisons for smart pointer types, so you can look +// up a std::unique_ptr<T> element with a raw pointer, for instance. +// +// 4. Oilpan +// +// If you want to store garbage collected objects in Vector, (1) use HeapVector +// (defined in HeapAllocator.h) instead of Vector, and (2) make sure your +// garbage-collected type is wrapped with Member, like: +// +// HeapVector<Member<Node>> nodes; +// +// Unlike normal garbage-collected objects, a HeapVector object itself is +// NOT a garbage-collected object, but its backing buffer is allocated in +// Oilpan heap, and it may still carry garbage-collected objects. +// +// Even though a HeapVector object is not garbage-collected, you still need +// to trace it, if you stored it in your class. Also, you can allocate it +// as a local variable. This is useful when you want to build a vector locally +// and put it in an on-heap vector with swap(). +// +// Also, heap compaction, which may happen at any time when Blink code is not +// running (i.e. Blink code does not appear in the call stack), may invalidate +// existing iterators for any HeapVectors. So, essentially, you should always +// allocate an iterator on stack (as a local variable), and you should not +// store iterators in another heap object. + +template <typename T, + size_t inlineCapacity = 0, + typename Allocator = PartitionAllocator> +class Vector + : private VectorBuffer<T, INLINE_CAPACITY, Allocator>, + // Heap-allocated vectors with no inlineCapacity never need a destructor. + public ConditionalDestructor<Vector<T, INLINE_CAPACITY, Allocator>, + (INLINE_CAPACITY == 0) && + Allocator::isGarbageCollected> { + USE_ALLOCATOR(Vector, Allocator); + using Base = VectorBuffer<T, INLINE_CAPACITY, Allocator>; + using TypeOperations = VectorTypeOperations<T>; + using OffsetRange = typename Base::OffsetRange; + + public: + using ValueType = T; + using value_type = T; + + using iterator = T*; + using const_iterator = const T*; + using reverse_iterator = std::reverse_iterator<iterator>; + using const_reverse_iterator = std::reverse_iterator<const_iterator>; + + // Create an empty vector. + inline Vector(); + // Create a vector containing the specified number of default-initialized + // elements. + inline explicit Vector(size_t); + // Create a vector containing the specified number of elements, each of which + // is copy initialized from the specified value. + inline Vector(size_t, const T&); + + // Copying. + Vector(const Vector&); + template <size_t otherCapacity> + explicit Vector(const Vector<T, otherCapacity, Allocator>&); + + Vector& operator=(const Vector&); + template <size_t otherCapacity> + Vector& operator=(const Vector<T, otherCapacity, Allocator>&); + + // Moving. + Vector(Vector&&); + Vector& operator=(Vector&&); + + // Construct with an initializer list. You can do e.g. + // Vector<int> v({1, 2, 3}); + // or + // v = {4, 5, 6}; + Vector(std::initializer_list<T> elements); + Vector& operator=(std::initializer_list<T> elements); + + // Basic inquiry about the vector's state. + // + // capacity() is the maximum number of elements that the Vector can hold + // without a reallocation. It can be zero. + size_t size() const { return m_size; } + size_t capacity() const { return Base::capacity(); } + bool isEmpty() const { return !size(); } + + // at() and operator[]: Obtain the reference of the element that is located + // at the given index. The reference may be invalidated on a reallocation. + // + // at() can be used in cases like: + // pointerToVector->at(1); + // instead of: + // (*pointerToVector)[1]; + T& at(size_t i) { + RELEASE_ASSERT(i < size()); + return Base::buffer()[i]; + } + const T& at(size_t i) const { + RELEASE_ASSERT(i < size()); + return Base::buffer()[i]; + } + + T& operator[](size_t i) { return at(i); } + const T& operator[](size_t i) const { return at(i); } + + // Return a pointer to the front of the backing buffer. Those pointers get + // invalidated on a reallocation. + T* data() { return Base::buffer(); } + const T* data() const { return Base::buffer(); } + + // Iterators and reverse iterators. They are invalidated on a reallocation. + iterator begin() { return data(); } + iterator end() { return begin() + m_size; } + const_iterator begin() const { return data(); } + const_iterator end() const { return begin() + m_size; } + + reverse_iterator rbegin() { return reverse_iterator(end()); } + reverse_iterator rend() { return reverse_iterator(begin()); } + const_reverse_iterator rbegin() const { + return const_reverse_iterator(end()); + } + const_reverse_iterator rend() const { + return const_reverse_iterator(begin()); + } + + // Quick access to the first and the last element. It is invalid to call + // these functions when the vector is empty. + T& front() { return at(0); } + const T& front() const { return at(0); } + T& back() { return at(size() - 1); } + const T& back() const { return at(size() - 1); } + + // Searching. + // + // Comparisons are done in terms of compareElement(), which is usually + // operator==(). find() and reverseFind() returns an index of the element + // that is found first. If no match is found, kNotFound will be returned. + template <typename U> + bool contains(const U&) const; + template <typename U> + size_t find(const U&) const; + template <typename U> + size_t reverseFind(const U&) const; + + // Resize the vector to the specified size. + // + // These three functions are essentially similar. They differ in that + // (1) shrink() has a DCHECK to make sure the specified size is not more than + // size(), and (2) grow() has a DCHECK to make sure the specified size is + // not less than size(). + // + // When a vector shrinks, the extra elements in the back will be destructed. + // All the iterators pointing to a to-be-destructed element will be + // invalidated. + // + // When a vector grows, new elements will be added in the back, and they + // will be default-initialized. A reallocation may happen in this case. + void shrink(size_t); + void grow(size_t); + void resize(size_t); + + // Increase the capacity of the vector to at least |newCapacity|. The + // elements in the vector are not affected. This function does not shrink + // the size of the backing buffer, even if |newCapacity| is small. This + // function may cause a reallocation. + void reserveCapacity(size_t newCapacity); + + // This is similar to reserveCapacity() but must be called immediately after + // the vector is default-constructed. + void reserveInitialCapacity(size_t initialCapacity); + + // Shrink the backing buffer so it can contain exactly |size()| elements. + // This function may cause a reallocation. + void shrinkToFit() { shrinkCapacity(size()); } + + // Shrink the backing buffer if at least 50% of the vector's capacity is + // unused. If it shrinks, the new buffer contains roughly 25% of unused + // space. This function may cause a reallocation. + void shrinkToReasonableCapacity() { + if (size() * 2 < capacity()) + shrinkCapacity(size() + size() / 4 + 1); + } + + // Remove all the elements. This function actually releases the backing + // buffer, thus any iterators will get invalidated (including begin()). + void clear() { shrinkCapacity(0); } + + // Insertion to the back. All of these functions except uncheckedAppend() may + // cause a reallocation. + // + // push_back(value) + // Insert a single element to the back. + // emplace_back(args...) + // Insert a single element constructed as T(args...) to the back. The + // element is constructed directly on the backing buffer with placement + // new. + // append(buffer, size) + // appendVector(vector) + // appendRange(begin, end) + // Insert multiple elements represented by (1) |buffer| and |size| + // (for append), (2) |vector| (for appendVector), or (3) a pair of + // iterators (for appendRange) to the back. The elements will be copied. + // uncheckedAppend(value) + // Insert a single element like push_back(), but this function assumes + // the vector has enough capacity such that it can store the new element + // without a reallocation. Using this function could improve the + // performance when you append many elements repeatedly. + template <typename U> + void push_back(U&&); + template <typename... Args> + T& emplace_back(Args&&...); + ALWAYS_INLINE T& emplace_back() { + grow(m_size + 1); + return back(); + } + template <typename U> + void append(const U*, size_t); + template <typename U, size_t otherCapacity, typename V> + void appendVector(const Vector<U, otherCapacity, V>&); + template <typename Iterator> + void appendRange(Iterator begin, Iterator end); + template <typename U> + void uncheckedAppend(U&&); + + // Insertion to an arbitrary position. All of these functions will take + // O(size())-time. All of the elements after |position| will be moved to + // the new locations. |position| must be no more than size(). All of these + // functions may cause a reallocation. In any case, all the iterators + // pointing to an element after |position| will be invalidated. + // + // insert(position, value) + // Insert a single element at |position|. + // insert(position, buffer, size) + // insert(position, vector) + // Insert multiple elements represented by either |buffer| and |size| + // or |vector| at |position|. The elements will be copied. + // + // TODO(yutak): Why not insertVector()? + template <typename U> + void insert(size_t position, U&&); + template <typename U> + void insert(size_t position, const U*, size_t); + template <typename U, size_t otherCapacity, typename OtherAllocator> + void insert(size_t position, const Vector<U, otherCapacity, OtherAllocator>&); + + // Insertion to the front. All of these functions will take O(size())-time. + // All of the elements in the vector will be moved to the new locations. + // All of these functions may cause a reallocation. In any case, all the + // iterators pointing to any element in the vector will be invalidated. + // + // push_front(value) + // Insert a single element to the front. + // push_front(buffer, size) + // prependVector(vector) + // Insert multiple elements represented by either |buffer| and |size| or + // |vector| to the front. The elements will be copied. + template <typename U> + void push_front(U&&); + template <typename U> + void push_front(const U*, size_t); + template <typename U, size_t otherCapacity, typename OtherAllocator> + void prependVector(const Vector<U, otherCapacity, OtherAllocator>&); + + // Remove an element or elements at the specified position. These functions + // take O(size())-time. All of the elements after the removed ones will be + // moved to the new locations. All the iterators pointing to any element + // after |position| will be invalidated. + void remove(size_t position); + void remove(size_t position, size_t length); + + // Remove the last element. Unlike remove(), (1) this function is fast, and + // (2) only iterators pointing to the last element will be invalidated. Other + // references will remain valid. + void pop_back() { + DCHECK(!isEmpty()); + shrink(size() - 1); + } + + // Filling the vector with the same value. If the vector has shrinked or + // growed as a result of this call, those events may invalidate some + // iterators. See comments for shrink() and grow(). + // + // fill(value, size) will resize the Vector to |size|, and then copy-assign + // or copy-initialize all the elements. + // + // fill(value) is a synonym for fill(value, size()). + void fill(const T&, size_t); + void fill(const T& val) { fill(val, size()); } + + // Swap two vectors quickly. + void swap(Vector& other) { + Base::swapVectorBuffer(other, OffsetRange(), OffsetRange()); + } + + // Reverse the contents. + void reverse(); + + // Maximum element count supported; allocating a vector + // buffer with a larger count will fail. + static size_t maxCapacity() { + return Allocator::template maxElementCountInBackingStore<T>(); + } + + // Off-GC-heap vectors: Destructor should be called. + // On-GC-heap vectors: Destructor should be called for inline buffers (if + // any) but destructor shouldn't be called for vector backing since it is + // managed by the traced GC heap. + void finalize() { + if (!INLINE_CAPACITY) { + if (LIKELY(!Base::buffer())) + return; + } + ANNOTATE_DELETE_BUFFER(begin(), capacity(), m_size); + if (LIKELY(m_size) && + !(Allocator::isGarbageCollected && this->hasOutOfLineBuffer())) { + TypeOperations::destruct(begin(), end()); + m_size = 0; // Partial protection against use-after-free. + } + + Base::destruct(); + } + + void finalizeGarbageCollectedObject() { finalize(); } + + template <typename VisitorDispatcher> + void trace(VisitorDispatcher); + + class GCForbiddenScope { + STACK_ALLOCATED(); + + public: + GCForbiddenScope() { Allocator::enterGCForbiddenScope(); } + ~GCForbiddenScope() { Allocator::leaveGCForbiddenScope(); } + }; + + protected: + using Base::checkUnusedSlots; + using Base::clearUnusedSlots; + + private: + void expandCapacity(size_t newMinCapacity); + T* expandCapacity(size_t newMinCapacity, T*); + T* expandCapacity(size_t newMinCapacity, const T* data) { + return expandCapacity(newMinCapacity, const_cast<T*>(data)); + } + + template <typename U> + U* expandCapacity(size_t newMinCapacity, U*); + void shrinkCapacity(size_t newCapacity); + template <typename U> + void appendSlowCase(U&&); + + using Base::m_size; + using Base::buffer; + using Base::swapVectorBuffer; + using Base::allocateBuffer; + using Base::allocationSize; +}; + +// +// Vector out-of-line implementation +// + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Vector<T, inlineCapacity, Allocator>::Vector() { + static_assert(!std::is_polymorphic<T>::value || + !VectorTraits<T>::canInitializeWithMemset, + "Cannot initialize with memset if there is a vtable"); + static_assert(Allocator::isGarbageCollected || + !AllowsOnlyPlacementNew<T>::value || !IsTraceable<T>::value, + "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " + "have trace methods into an off-heap Vector"); + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<T>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap Vector. Use HeapVector<Member<T>> instead."); + + ANNOTATE_NEW_BUFFER(begin(), capacity(), 0); + m_size = 0; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Vector<T, inlineCapacity, Allocator>::Vector(size_t size) : Base(size) { + static_assert(!std::is_polymorphic<T>::value || + !VectorTraits<T>::canInitializeWithMemset, + "Cannot initialize with memset if there is a vtable"); + static_assert(Allocator::isGarbageCollected || + !AllowsOnlyPlacementNew<T>::value || !IsTraceable<T>::value, + "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " + "have trace methods into an off-heap Vector"); + static_assert(Allocator::isGarbageCollected || + !IsPointerToGarbageCollectedType<T>::value, + "Cannot put raw pointers to garbage-collected classes into " + "an off-heap Vector. Use HeapVector<Member<T>> instead."); + + ANNOTATE_NEW_BUFFER(begin(), capacity(), size); + m_size = size; + TypeOperations::initialize(begin(), end()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline Vector<T, inlineCapacity, Allocator>::Vector(size_t size, const T& val) + : Base(size) { + // TODO(yutak): Introduce these assertions. Some use sites call this function + // in the context where T is an incomplete type. + // + // static_assert(!std::is_polymorphic<T>::value || + // !VectorTraits<T>::canInitializeWithMemset, + // "Cannot initialize with memset if there is a vtable"); + // static_assert(Allocator::isGarbageCollected || + // !AllowsOnlyPlacementNew<T>::value || + // !IsTraceable<T>::value, + // "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " + // "have trace methods into an off-heap Vector"); + // static_assert(Allocator::isGarbageCollected || + // !IsPointerToGarbageCollectedType<T>::value, + // "Cannot put raw pointers to garbage-collected classes into " + // "an off-heap Vector. Use HeapVector<Member<T>> instead."); + + ANNOTATE_NEW_BUFFER(begin(), capacity(), size); + m_size = size; + TypeOperations::uninitializedFill(begin(), end(), val); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +Vector<T, inlineCapacity, Allocator>::Vector(const Vector& other) + : Base(other.capacity()) { + ANNOTATE_NEW_BUFFER(begin(), capacity(), other.size()); + m_size = other.size(); + TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <size_t otherCapacity> +Vector<T, inlineCapacity, Allocator>::Vector( + const Vector<T, otherCapacity, Allocator>& other) + : Base(other.capacity()) { + ANNOTATE_NEW_BUFFER(begin(), capacity(), other.size()); + m_size = other.size(); + TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: +operator=(const Vector<T, inlineCapacity, Allocator>& other) { + if (UNLIKELY(&other == this)) + return *this; + + if (size() > other.size()) { + shrink(other.size()); + } else if (other.size() > capacity()) { + clear(); + reserveCapacity(other.size()); + DCHECK(begin()); + } + + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, other.size()); + std::copy(other.begin(), other.begin() + size(), begin()); + TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); + m_size = other.size(); + + return *this; +} + +inline bool typelessPointersAreEqual(const void* a, const void* b) { + return a == b; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <size_t otherCapacity> +Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: +operator=(const Vector<T, otherCapacity, Allocator>& other) { + // If the inline capacities match, we should call the more specific + // template. If the inline capacities don't match, the two objects + // shouldn't be allocated the same address. + DCHECK(!typelessPointersAreEqual(&other, this)); + + if (size() > other.size()) { + shrink(other.size()); + } else if (other.size() > capacity()) { + clear(); + reserveCapacity(other.size()); + DCHECK(begin()); + } + + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, other.size()); + std::copy(other.begin(), other.begin() + size(), begin()); + TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); + m_size = other.size(); + + return *this; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +Vector<T, inlineCapacity, Allocator>::Vector( + Vector<T, inlineCapacity, Allocator>&& other) { + m_size = 0; + // It's a little weird to implement a move constructor using swap but this + // way we don't have to add a move constructor to VectorBuffer. + swap(other); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: +operator=(Vector<T, inlineCapacity, Allocator>&& other) { + swap(other); + return *this; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +Vector<T, inlineCapacity, Allocator>::Vector(std::initializer_list<T> elements) + : Base(elements.size()) { + ANNOTATE_NEW_BUFFER(begin(), capacity(), elements.size()); + m_size = elements.size(); + TypeOperations::uninitializedCopy(elements.begin(), elements.end(), begin()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: +operator=(std::initializer_list<T> elements) { + if (size() > elements.size()) { + shrink(elements.size()); + } else if (elements.size() > capacity()) { + clear(); + reserveCapacity(elements.size()); + DCHECK(begin()); + } + + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, elements.size()); + std::copy(elements.begin(), elements.begin() + m_size, begin()); + TypeOperations::uninitializedCopy(elements.begin() + m_size, elements.end(), + end()); + m_size = elements.size(); + + return *this; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +bool Vector<T, inlineCapacity, Allocator>::contains(const U& value) const { + return find(value) != kNotFound; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +size_t Vector<T, inlineCapacity, Allocator>::find(const U& value) const { + const T* b = begin(); + const T* e = end(); + for (const T* iter = b; iter < e; ++iter) { + if (TypeOperations::compareElement(*iter, value)) + return iter - b; + } + return kNotFound; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +size_t Vector<T, inlineCapacity, Allocator>::reverseFind(const U& value) const { + const T* b = begin(); + const T* iter = end(); + while (iter > b) { + --iter; + if (TypeOperations::compareElement(*iter, value)) + return iter - b; + } + return kNotFound; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Vector<T, inlineCapacity, Allocator>::fill(const T& val, size_t newSize) { + if (size() > newSize) { + shrink(newSize); + } else if (newSize > capacity()) { + clear(); + reserveCapacity(newSize); + DCHECK(begin()); + } + + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, newSize); + std::fill(begin(), end(), val); + TypeOperations::uninitializedFill(end(), begin() + newSize, val); + m_size = newSize; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Vector<T, inlineCapacity, Allocator>::expandCapacity( + size_t newMinCapacity) { + size_t oldCapacity = capacity(); + size_t expandedCapacity = oldCapacity; + // We use a more aggressive expansion strategy for Vectors with inline + // storage. This is because they are more likely to be on the stack, so the + // risk of heap bloat is minimized. Furthermore, exceeding the inline + // capacity limit is not supposed to happen in the common case and may + // indicate a pathological condition or microbenchmark. + if (INLINE_CAPACITY) { + expandedCapacity *= 2; + // Check for integer overflow, which could happen in the 32-bit build. + RELEASE_ASSERT(expandedCapacity > oldCapacity); + } else { + // This cannot integer overflow. + // On 64-bit, the "expanded" integer is 32-bit, and any encroachment + // above 2^32 will fail allocation in allocateBuffer(). On 32-bit, + // there's not enough address space to hold the old and new buffers. In + // addition, our underlying allocator is supposed to always fail on > + // (2^31 - 1) allocations. + expandedCapacity += (expandedCapacity / 4) + 1; + } + reserveCapacity(std::max( + newMinCapacity, + std::max(static_cast<size_t>(kInitialVectorSize), expandedCapacity))); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +T* Vector<T, inlineCapacity, Allocator>::expandCapacity(size_t newMinCapacity, + T* ptr) { + if (ptr < begin() || ptr >= end()) { + expandCapacity(newMinCapacity); + return ptr; + } + size_t index = ptr - begin(); + expandCapacity(newMinCapacity); + return begin() + index; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +inline U* Vector<T, inlineCapacity, Allocator>::expandCapacity( + size_t newMinCapacity, + U* ptr) { + expandCapacity(newMinCapacity); + return ptr; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Vector<T, inlineCapacity, Allocator>::resize(size_t size) { + if (size <= m_size) { + TypeOperations::destruct(begin() + size, end()); + clearUnusedSlots(begin() + size, end()); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); + } else { + if (size > capacity()) + expandCapacity(size); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); + TypeOperations::initialize(end(), begin() + size); + } + + m_size = size; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Vector<T, inlineCapacity, Allocator>::shrink(size_t size) { + DCHECK_LE(size, m_size); + TypeOperations::destruct(begin() + size, end()); + clearUnusedSlots(begin() + size, end()); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); + m_size = size; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Vector<T, inlineCapacity, Allocator>::grow(size_t size) { + DCHECK_GE(size, m_size); + if (size > capacity()) + expandCapacity(size); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); + TypeOperations::initialize(end(), begin() + size); + m_size = size; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Vector<T, inlineCapacity, Allocator>::reserveCapacity(size_t newCapacity) { + if (UNLIKELY(newCapacity <= capacity())) + return; + T* oldBuffer = begin(); + if (!oldBuffer) { + Base::allocateBuffer(newCapacity); + return; + } +#ifdef ANNOTATE_CONTIGUOUS_CONTAINER + size_t oldCapacity = capacity(); +#endif + // The Allocator::isGarbageCollected check is not needed. The check is just + // a static hint for a compiler to indicate that Base::expandBuffer returns + // false if Allocator is a PartitionAllocator. + if (Allocator::isGarbageCollected && Base::expandBuffer(newCapacity)) { + ANNOTATE_CHANGE_CAPACITY(begin(), oldCapacity, m_size, capacity()); + return; + } + T* oldEnd = end(); + Base::allocateExpandedBuffer(newCapacity); + ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); + TypeOperations::move(oldBuffer, oldEnd, begin()); + clearUnusedSlots(oldBuffer, oldEnd); + ANNOTATE_DELETE_BUFFER(oldBuffer, oldCapacity, m_size); + Base::deallocateBuffer(oldBuffer); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Vector<T, inlineCapacity, Allocator>::reserveInitialCapacity( + size_t initialCapacity) { + DCHECK(!m_size); + DCHECK(capacity() == INLINE_CAPACITY); + if (initialCapacity > INLINE_CAPACITY) { + ANNOTATE_DELETE_BUFFER(begin(), capacity(), m_size); + Base::allocateBuffer(initialCapacity); + ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); + } +} + +template <typename T, size_t inlineCapacity, typename Allocator> +void Vector<T, inlineCapacity, Allocator>::shrinkCapacity(size_t newCapacity) { + if (newCapacity >= capacity()) + return; + + if (newCapacity < size()) + shrink(newCapacity); + + T* oldBuffer = begin(); +#ifdef ANNOTATE_CONTIGUOUS_CONTAINER + size_t oldCapacity = capacity(); +#endif + if (newCapacity > 0) { + if (Base::shrinkBuffer(newCapacity)) { + ANNOTATE_CHANGE_CAPACITY(begin(), oldCapacity, m_size, capacity()); + return; + } + + T* oldEnd = end(); + Base::allocateBuffer(newCapacity); + if (begin() != oldBuffer) { + ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); + TypeOperations::move(oldBuffer, oldEnd, begin()); + clearUnusedSlots(oldBuffer, oldEnd); + ANNOTATE_DELETE_BUFFER(oldBuffer, oldCapacity, m_size); + } + } else { + Base::resetBufferPointer(); +#ifdef ANNOTATE_CONTIGUOUS_CONTAINER + if (oldBuffer != begin()) { + ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); + ANNOTATE_DELETE_BUFFER(oldBuffer, oldCapacity, m_size); + } +#endif + } + + Base::deallocateBuffer(oldBuffer); +} + +// Templatizing these is better than just letting the conversion happen +// implicitly, because for instance it allows a PassRefPtr to be appended to a +// RefPtr vector without refcount thrash. + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +ALWAYS_INLINE void Vector<T, inlineCapacity, Allocator>::push_back(U&& val) { + DCHECK(Allocator::isAllocationAllowed()); + if (LIKELY(size() != capacity())) { + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); + new (NotNull, end()) T(std::forward<U>(val)); + ++m_size; + return; + } + + appendSlowCase(std::forward<U>(val)); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename... Args> +ALWAYS_INLINE T& Vector<T, inlineCapacity, Allocator>::emplace_back( + Args&&... args) { + DCHECK(Allocator::isAllocationAllowed()); + if (UNLIKELY(size() == capacity())) + expandCapacity(size() + 1); + + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); + T* t = new (NotNull, end()) T(std::forward<Args>(args)...); + ++m_size; + return *t; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +void Vector<T, inlineCapacity, Allocator>::append(const U* data, + size_t dataSize) { + DCHECK(Allocator::isAllocationAllowed()); + size_t newSize = m_size + dataSize; + if (newSize > capacity()) { + data = expandCapacity(newSize, data); + DCHECK(begin()); + } + RELEASE_ASSERT(newSize >= m_size); + T* dest = end(); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, newSize); + VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy( + data, &data[dataSize], dest); + m_size = newSize; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +NEVER_INLINE void Vector<T, inlineCapacity, Allocator>::appendSlowCase( + U&& val) { + DCHECK_EQ(size(), capacity()); + + typename std::remove_reference<U>::type* ptr = &val; + ptr = expandCapacity(size() + 1, ptr); + DCHECK(begin()); + + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); + new (NotNull, end()) T(std::forward<U>(*ptr)); + ++m_size; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U, size_t otherCapacity, typename OtherAllocator> +inline void Vector<T, inlineCapacity, Allocator>::appendVector( + const Vector<U, otherCapacity, OtherAllocator>& val) { + append(val.begin(), val.size()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename Iterator> +void Vector<T, inlineCapacity, Allocator>::appendRange(Iterator begin, + Iterator end) { + for (Iterator it = begin; it != end; ++it) + push_back(*it); +} + +// This version of append saves a branch in the case where you know that the +// vector's capacity is large enough for the append to succeed. +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +ALWAYS_INLINE void Vector<T, inlineCapacity, Allocator>::uncheckedAppend( + U&& val) { +#ifdef ANNOTATE_CONTIGUOUS_CONTAINER + // Vectors in ASAN builds don't have inlineCapacity. + push_back(std::forward<U>(val)); +#else + DCHECK_LT(size(), capacity()); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); + new (NotNull, end()) T(std::forward<U>(val)); + ++m_size; +#endif +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +inline void Vector<T, inlineCapacity, Allocator>::insert(size_t position, + U&& val) { + DCHECK(Allocator::isAllocationAllowed()); + RELEASE_ASSERT(position <= size()); + typename std::remove_reference<U>::type* data = &val; + if (size() == capacity()) { + data = expandCapacity(size() + 1, data); + DCHECK(begin()); + } + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); + T* spot = begin() + position; + TypeOperations::moveOverlapping(spot, end(), spot + 1); + new (NotNull, spot) T(std::forward<U>(*data)); + ++m_size; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +void Vector<T, inlineCapacity, Allocator>::insert(size_t position, + const U* data, + size_t dataSize) { + DCHECK(Allocator::isAllocationAllowed()); + RELEASE_ASSERT(position <= size()); + size_t newSize = m_size + dataSize; + if (newSize > capacity()) { + data = expandCapacity(newSize, data); + DCHECK(begin()); + } + RELEASE_ASSERT(newSize >= m_size); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, newSize); + T* spot = begin() + position; + TypeOperations::moveOverlapping(spot, end(), spot + dataSize); + VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy( + data, &data[dataSize], spot); + m_size = newSize; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U, size_t otherCapacity, typename OtherAllocator> +inline void Vector<T, inlineCapacity, Allocator>::insert( + size_t position, + const Vector<U, otherCapacity, OtherAllocator>& val) { + insert(position, val.begin(), val.size()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +inline void Vector<T, inlineCapacity, Allocator>::push_front(U&& val) { + insert(0, std::forward<U>(val)); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U> +void Vector<T, inlineCapacity, Allocator>::push_front(const U* data, + size_t dataSize) { + insert(0, data, dataSize); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename U, size_t otherCapacity, typename OtherAllocator> +inline void Vector<T, inlineCapacity, Allocator>::prependVector( + const Vector<U, otherCapacity, OtherAllocator>& val) { + insert(0, val.begin(), val.size()); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Vector<T, inlineCapacity, Allocator>::remove(size_t position) { + RELEASE_ASSERT(position < size()); + T* spot = begin() + position; + spot->~T(); + TypeOperations::moveOverlapping(spot + 1, end(), spot); + clearUnusedSlots(end() - 1, end()); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size - 1); + --m_size; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Vector<T, inlineCapacity, Allocator>::remove(size_t position, + size_t length) { + SECURITY_DCHECK(position <= size()); + if (!length) + return; + RELEASE_ASSERT(position + length <= size()); + T* beginSpot = begin() + position; + T* endSpot = beginSpot + length; + TypeOperations::destruct(beginSpot, endSpot); + TypeOperations::moveOverlapping(endSpot, end(), beginSpot); + clearUnusedSlots(end() - length, end()); + ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size - length); + m_size -= length; +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void Vector<T, inlineCapacity, Allocator>::reverse() { + for (size_t i = 0; i < m_size / 2; ++i) + std::swap(at(i), at(m_size - 1 - i)); +} + +template <typename T, size_t inlineCapacity, typename Allocator> +inline void swap(Vector<T, inlineCapacity, Allocator>& a, + Vector<T, inlineCapacity, Allocator>& b) { + a.swap(b); +} + +template <typename T, + size_t inlineCapacityA, + size_t inlineCapacityB, + typename Allocator> +bool operator==(const Vector<T, inlineCapacityA, Allocator>& a, + const Vector<T, inlineCapacityB, Allocator>& b) { + if (a.size() != b.size()) + return false; + if (a.isEmpty()) + return true; + return VectorTypeOperations<T>::compare(a.data(), b.data(), a.size()); +} + +template <typename T, + size_t inlineCapacityA, + size_t inlineCapacityB, + typename Allocator> +inline bool operator!=(const Vector<T, inlineCapacityA, Allocator>& a, + const Vector<T, inlineCapacityB, Allocator>& b) { + return !(a == b); +} + +// This is only called if the allocator is a HeapAllocator. It is used when +// visiting during a tracing GC. +template <typename T, size_t inlineCapacity, typename Allocator> +template <typename VisitorDispatcher> +void Vector<T, inlineCapacity, Allocator>::trace(VisitorDispatcher visitor) { + DCHECK(Allocator::isGarbageCollected) << "Garbage collector must be enabled."; + if (!buffer()) + return; + if (this->hasOutOfLineBuffer()) { + // This is a performance optimization for a case where the buffer has + // been already traced by somewhere. This can happen if the conservative + // scanning traced an on-stack (false-positive or real) pointer to the + // HeapVector, and then visitor->trace() traces the HeapVector. + if (Allocator::isHeapObjectAlive(buffer())) + return; + Allocator::markNoTracing(visitor, buffer()); + Allocator::registerBackingStoreReference(visitor, Base::bufferSlot()); + } + const T* bufferBegin = buffer(); + const T* bufferEnd = buffer() + size(); + if (IsTraceableInCollectionTrait<VectorTraits<T>>::value) { + for (const T* bufferEntry = bufferBegin; bufferEntry != bufferEnd; + bufferEntry++) + Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( + visitor, *const_cast<T*>(bufferEntry)); + checkUnusedSlots(buffer() + size(), buffer() + capacity()); + } +} + +} // namespace WTF + +using WTF::Vector; + +#endif // WTF_Vector_h
diff --git a/third_party/WebKit/Source/platform/wtf/VectorTraits.h b/third_party/WebKit/Source/platform/wtf/VectorTraits.h new file mode 100644 index 0000000..5f20a75 --- /dev/null +++ b/third_party/WebKit/Source/platform/wtf/VectorTraits.h
@@ -0,0 +1,186 @@ +/* + * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public License + * along with this library; see the file COPYING.LIB. If not, write to + * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + * Boston, MA 02110-1301, USA. + * + */ + +#ifndef WTF_VectorTraits_h +#define WTF_VectorTraits_h + +#include "platform/wtf/RefPtr.h" +#include "platform/wtf/TypeTraits.h" +#include <memory> +#include <type_traits> +#include <utility> + +namespace WTF { + +template <typename T> +struct VectorTraitsBase { + static const bool needsDestruction = !IsTriviallyDestructible<T>::value; + + static const bool canInitializeWithMemset = + IsTriviallyDefaultConstructible<T>::value; + // true iff memset(slot, 0, size) constructs an unused slot value that is + // valid for Oilpan to trace and if the value needs destruction, its + // destructor can be invoked over. The zero'ed value representing an unused + // slot in the vector's backing storage; it does not have to be equal to + // what its constructor(s) would create, only be valid for those two uses. + static const bool canClearUnusedSlotsWithMemset = + IsTriviallyDefaultConstructible<T>::value; + + static const bool canMoveWithMemcpy = IsTriviallyMoveAssignable<T>::value; + static const bool canCopyWithMemcpy = IsTriviallyCopyAssignable<T>::value; + static const bool canFillWithMemset = + IsTriviallyDefaultConstructible<T>::value && (sizeof(T) == sizeof(char)); + static const bool canCompareWithMemcmp = + std::is_scalar<T>::value; // Types without padding. + + // Supports swapping elements using regular std::swap semantics. + static const bool canSwapUsingCopyOrMove = true; + + template <typename U = void> + struct IsTraceableInCollection { + static const bool value = IsTraceable<T>::value; + }; + // We don't support weak handling in vectors. + static const WeakHandlingFlag weakHandlingFlag = NoWeakHandlingInCollections; +}; + +template <typename T> +struct VectorTraits : VectorTraitsBase<T> {}; + +// Classes marked with SimpleVectorTraits will use memmov, memcpy, memcmp +// instead of constructors, copy operators, etc for initialization, move and +// comparison. +template <typename T> +struct SimpleClassVectorTraits : VectorTraitsBase<T> { + static const bool canInitializeWithMemset = true; + static const bool canClearUnusedSlotsWithMemset = true; + static const bool canMoveWithMemcpy = true; + static const bool canCompareWithMemcmp = true; +}; + +// We know std::unique_ptr and RefPtr are simple enough that initializing to 0 +// and moving with memcpy (and then not destructing the original) will totally +// work. +template <typename P> +struct VectorTraits<RefPtr<P>> : SimpleClassVectorTraits<RefPtr<P>> {}; + +template <typename P> +struct VectorTraits<std::unique_ptr<P>> + : SimpleClassVectorTraits<std::unique_ptr<P>> { + // std::unique_ptr -> std::unique_ptr has a very particular structure that + // tricks the normal type traits into thinking that the class is "trivially + // copyable". + static const bool canCopyWithMemcpy = false; +}; +static_assert(VectorTraits<RefPtr<int>>::canInitializeWithMemset, + "inefficient RefPtr Vector"); +static_assert(VectorTraits<RefPtr<int>>::canMoveWithMemcpy, + "inefficient RefPtr Vector"); +static_assert(VectorTraits<RefPtr<int>>::canCompareWithMemcmp, + "inefficient RefPtr Vector"); +static_assert(VectorTraits<std::unique_ptr<int>>::canInitializeWithMemset, + "inefficient std::unique_ptr Vector"); +static_assert(VectorTraits<std::unique_ptr<int>>::canMoveWithMemcpy, + "inefficient std::unique_ptr Vector"); +static_assert(VectorTraits<std::unique_ptr<int>>::canCompareWithMemcmp, + "inefficient std::unique_ptr Vector"); + +template <typename First, typename Second> +struct VectorTraits<std::pair<First, Second>> { + typedef VectorTraits<First> FirstTraits; + typedef VectorTraits<Second> SecondTraits; + + static const bool needsDestruction = + FirstTraits::needsDestruction || SecondTraits::needsDestruction; + static const bool canInitializeWithMemset = + FirstTraits::canInitializeWithMemset && + SecondTraits::canInitializeWithMemset; + static const bool canMoveWithMemcpy = + FirstTraits::canMoveWithMemcpy && SecondTraits::canMoveWithMemcpy; + static const bool canCopyWithMemcpy = + FirstTraits::canCopyWithMemcpy && SecondTraits::canCopyWithMemcpy; + static const bool canFillWithMemset = false; + static const bool canCompareWithMemcmp = + FirstTraits::canCompareWithMemcmp && SecondTraits::canCompareWithMemcmp; + static const bool canClearUnusedSlotsWithMemset = + FirstTraits::canClearUnusedSlotsWithMemset && + SecondTraits::canClearUnusedSlotsWithMemset; + // Supports swapping elements using regular std::swap semantics. + static const bool canSwapUsingCopyOrMove = true; + template <typename U = void> + struct IsTraceableInCollection { + static const bool value = + IsTraceableInCollectionTrait<FirstTraits>::value || + IsTraceableInCollectionTrait<SecondTraits>::value; + }; + // We don't support weak handling in vectors. + static const WeakHandlingFlag weakHandlingFlag = NoWeakHandlingInCollections; +}; + +} // namespace WTF + +#define WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(ClassName) \ + namespace WTF { \ + static_assert(!IsTriviallyDefaultConstructible<ClassName>::value || \ + !IsTriviallyMoveAssignable<ClassName>::value || \ + !std::is_scalar<ClassName>::value, \ + "macro not needed"); \ + template <> \ + struct VectorTraits<ClassName> : SimpleClassVectorTraits<ClassName> {}; \ + } + +#define WTF_ALLOW_MOVE_AND_INIT_WITH_MEM_FUNCTIONS(ClassName) \ + namespace WTF { \ + static_assert(!IsTriviallyDefaultConstructible<ClassName>::value || \ + !IsTriviallyMoveAssignable<ClassName>::value, \ + "macro not needed"); \ + template <> \ + struct VectorTraits<ClassName> : VectorTraitsBase<ClassName> { \ + static const bool canInitializeWithMemset = true; \ + static const bool canClearUnusedSlotsWithMemset = true; \ + static const bool canMoveWithMemcpy = true; \ + }; \ + } + +#define WTF_ALLOW_INIT_WITH_MEM_FUNCTIONS(ClassName) \ + namespace WTF { \ + static_assert(!IsTriviallyDefaultConstructible<ClassName>::value, \ + "macro not needed"); \ + template <> \ + struct VectorTraits<ClassName> : VectorTraitsBase<ClassName> { \ + static const bool canInitializeWithMemset = true; \ + static const bool canClearUnusedSlotsWithMemset = true; \ + }; \ + } + +#define WTF_ALLOW_CLEAR_UNUSED_SLOTS_WITH_MEM_FUNCTIONS(ClassName) \ + namespace WTF { \ + static_assert(!IsTriviallyDefaultConstructible<ClassName>::value, \ + "macro not needed"); \ + template <> \ + struct VectorTraits<ClassName> : VectorTraitsBase<ClassName> { \ + static const bool canClearUnusedSlotsWithMemset = true; \ + }; \ + } + +using WTF::VectorTraits; +using WTF::SimpleClassVectorTraits; + +#endif // WTF_VectorTraits_h
diff --git a/third_party/WebKit/Source/wtf/BUILD.gn b/third_party/WebKit/Source/wtf/BUILD.gn index fb01a91..9abc61de 100644 --- a/third_party/WebKit/Source/wtf/BUILD.gn +++ b/third_party/WebKit/Source/wtf/BUILD.gn
@@ -25,7 +25,6 @@ "Assertions.h", "Atomics.h", "AutoReset.h", - "BitVector.cpp", "BitVector.h", "BitwiseOperations.h", "BloomFilter.h", @@ -38,7 +37,6 @@ "ContainerAnnotations.h", "CryptographicallyRandomNumber.h", "CurrentTime.h", - "DataLog.cpp", "DataLog.h", "DateMath.cpp", "DateMath.h", @@ -55,7 +53,6 @@ "HashIterators.h", "HashMap.h", "HashSet.h", - "HashTable.cpp", "HashTable.h", "HashTableDeletedValueType.h", "HashTraits.h",
diff --git a/third_party/WebKit/Source/wtf/BitVector.h b/third_party/WebKit/Source/wtf/BitVector.h index dd0b2aa..b041aa3 100644 --- a/third_party/WebKit/Source/wtf/BitVector.h +++ b/third_party/WebKit/Source/wtf/BitVector.h
@@ -1,227 +1,9 @@ -/* - * Copyright (C) 2011 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef BitVector_h -#define BitVector_h +#include "platform/wtf/BitVector.h" -#include "wtf/Allocator.h" -#include "wtf/Assertions.h" -#include "wtf/StdLibExtras.h" -#include "wtf/WTFExport.h" - -namespace WTF { - -class PrintStream; - -// This is a space-efficient, resizeable bitvector class. In the common case it -// occupies one word, but if necessary, it will inflate this one word to point -// to a single chunk of out-of-line allocated storage to store an arbitrary -// number of bits. -// -// - The bitvector remembers the bound of how many bits can be stored, but this -// may be slightly greater (by as much as some platform-specific constant) -// than the last argument passed to ensureSize(). -// -// - The bitvector can resize itself automatically (set, clear, get) or can be -// used in a manual mode, which is faster (quickSet, quickClear, quickGet, -// ensureSize). -// -// - Accesses assert that you are within bounds. -// -// - Bits are automatically initialized to zero. -// -// On the other hand, this BitVector class may not be the fastest around, since -// it does conditionals on every get/set/clear. But it is great if you need to -// juggle a lot of variable-length BitVectors and you're worried about wasting -// space. - -class WTF_EXPORT BitVector { - DISALLOW_NEW(); - - public: - BitVector() : m_bitsOrPointer(makeInlineBits(0)) {} - - explicit BitVector(size_t numBits) : m_bitsOrPointer(makeInlineBits(0)) { - ensureSize(numBits); - } - - BitVector(const BitVector& other) : m_bitsOrPointer(makeInlineBits(0)) { - (*this) = other; - } - - ~BitVector() { - if (isInline()) - return; - OutOfLineBits::destroy(outOfLineBits()); - } - - BitVector& operator=(const BitVector& other) { - if (isInline() && other.isInline()) - m_bitsOrPointer = other.m_bitsOrPointer; - else - setSlow(other); - return *this; - } - - size_t size() const { - if (isInline()) - return maxInlineBits(); - return outOfLineBits()->numBits(); - } - - void ensureSize(size_t numBits) { - if (numBits <= size()) - return; - resizeOutOfLine(numBits); - } - - // Like ensureSize(), but supports reducing the size of the bitvector. - void resize(size_t numBits); - - void clearAll(); - - bool quickGet(size_t bit) const { - SECURITY_CHECK(bit < size()); - return !!(bits()[bit / bitsInPointer()] & - (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1)))); - } - - void quickSet(size_t bit) { - SECURITY_CHECK(bit < size()); - bits()[bit / bitsInPointer()] |= - (static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1))); - } - - void quickClear(size_t bit) { - SECURITY_CHECK(bit < size()); - bits()[bit / bitsInPointer()] &= - ~(static_cast<uintptr_t>(1) << (bit & (bitsInPointer() - 1))); - } - - void quickSet(size_t bit, bool value) { - if (value) - quickSet(bit); - else - quickClear(bit); - } - - bool get(size_t bit) const { - if (bit >= size()) - return false; - return quickGet(bit); - } - - void set(size_t bit) { - ensureSize(bit + 1); - quickSet(bit); - } - - void ensureSizeAndSet(size_t bit, size_t size) { - ensureSize(size); - quickSet(bit); - } - - void clear(size_t bit) { - if (bit >= size()) - return; - quickClear(bit); - } - - void set(size_t bit, bool value) { - if (value) - set(bit); - else - clear(bit); - } - - void dump(PrintStream& out); - - private: - static unsigned bitsInPointer() { return sizeof(void*) << 3; } - - static unsigned maxInlineBits() { return bitsInPointer() - 1; } - - static size_t byteCount(size_t bitCount) { return (bitCount + 7) >> 3; } - - static uintptr_t makeInlineBits(uintptr_t bits) { - DCHECK(!(bits & (static_cast<uintptr_t>(1) << maxInlineBits()))); - return bits | (static_cast<uintptr_t>(1) << maxInlineBits()); - } - - class WTF_EXPORT OutOfLineBits { - DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); - - public: - size_t numBits() const { return m_numBits; } - size_t numWords() const { - return (m_numBits + bitsInPointer() - 1) / bitsInPointer(); - } - uintptr_t* bits() { return bitwiseCast<uintptr_t*>(this + 1); } - const uintptr_t* bits() const { - return bitwiseCast<const uintptr_t*>(this + 1); - } - - static OutOfLineBits* create(size_t numBits); - - static void destroy(OutOfLineBits*); - - private: - OutOfLineBits(size_t numBits) : m_numBits(numBits) {} - - size_t m_numBits; - }; - - bool isInline() const { return m_bitsOrPointer >> maxInlineBits(); } - - const OutOfLineBits* outOfLineBits() const { - return bitwiseCast<const OutOfLineBits*>(m_bitsOrPointer << 1); - } - OutOfLineBits* outOfLineBits() { - return bitwiseCast<OutOfLineBits*>(m_bitsOrPointer << 1); - } - - void resizeOutOfLine(size_t numBits); - void setSlow(const BitVector& other); - - uintptr_t* bits() { - if (isInline()) - return &m_bitsOrPointer; - return outOfLineBits()->bits(); - } - - const uintptr_t* bits() const { - if (isInline()) - return &m_bitsOrPointer; - return outOfLineBits()->bits(); - } - - uintptr_t m_bitsOrPointer; -}; - -} // namespace WTF - -using WTF::BitVector; - -#endif // BitVector_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/DataLog.h b/third_party/WebKit/Source/wtf/DataLog.h index 63dd77f..300ecf4 100644 --- a/third_party/WebKit/Source/wtf/DataLog.h +++ b/third_party/WebKit/Source/wtf/DataLog.h
@@ -1,54 +1,9 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef DataLog_h -#define DataLog_h +#include "platform/wtf/DataLog.h" -#include "wtf/Assertions.h" -#include "wtf/Compiler.h" -#include "wtf/FilePrintStream.h" -#include "wtf/WTFExport.h" - -#include <stdarg.h> -#include <stdio.h> - -namespace WTF { - -FilePrintStream& dataFile(); - -WTF_EXPORT PRINTF_FORMAT(1, 0) void dataLogFV(const char* format, va_list); -WTF_EXPORT PRINTF_FORMAT(1, 2) void dataLogF(const char* format, ...); - -template <typename... T> -void dataLog(const T&... values) { - dataFile().print(values...); -} - -} // namespace WTF - -using WTF::dataLog; -using WTF::dataLogF; - -#endif // DataLog_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/Deque.h b/third_party/WebKit/Source/wtf/Deque.h index bb7aa0a7..d33a8a0 100644 --- a/third_party/WebKit/Source/wtf/Deque.h +++ b/third_party/WebKit/Source/wtf/Deque.h
@@ -1,706 +1,9 @@ -/* - * Copyright (C) 2007, 2008 Apple Inc. All rights reserved. - * Copyright (C) 2009 Google Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of - * its contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_Deque_h -#define WTF_Deque_h +#include "platform/wtf/Deque.h" -// FIXME: Could move what Vector and Deque share into a separate file. -// Deque doesn't actually use Vector. - -#include "wtf/Vector.h" -#include <iterator> - -namespace WTF { - -template <typename T, size_t inlineCapacity, typename Allocator> -class DequeIteratorBase; -template <typename T, size_t inlineCapacity, typename Allocator> -class DequeIterator; -template <typename T, size_t inlineCapacity, typename Allocator> -class DequeConstIterator; - -template <typename T, - size_t inlineCapacity = 0, - typename Allocator = PartitionAllocator> -class Deque : public ConditionalDestructor<Deque<T, INLINE_CAPACITY, Allocator>, - (INLINE_CAPACITY == 0) && - Allocator::isGarbageCollected> { - USE_ALLOCATOR(Deque, Allocator); - - public: - typedef DequeIterator<T, inlineCapacity, Allocator> iterator; - typedef DequeConstIterator<T, inlineCapacity, Allocator> const_iterator; - typedef std::reverse_iterator<iterator> reverse_iterator; - typedef std::reverse_iterator<const_iterator> const_reverse_iterator; - - Deque(); - Deque(const Deque&); - Deque& operator=(const Deque&); - Deque(Deque&&); - Deque& operator=(Deque&&); - - void finalize(); - void finalizeGarbageCollectedObject() { finalize(); } - - void swap(Deque&); - - size_t size() const { - return m_start <= m_end ? m_end - m_start - : m_end + m_buffer.capacity() - m_start; - } - bool isEmpty() const { return m_start == m_end; } - - iterator begin() { return iterator(this, m_start); } - iterator end() { return iterator(this, m_end); } - const_iterator begin() const { return const_iterator(this, m_start); } - const_iterator end() const { return const_iterator(this, m_end); } - reverse_iterator rbegin() { return reverse_iterator(end()); } - reverse_iterator rend() { return reverse_iterator(begin()); } - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - const_reverse_iterator rend() const { - return const_reverse_iterator(begin()); - } - - T& front() { - DCHECK_NE(m_start, m_end); - return m_buffer.buffer()[m_start]; - } - const T& front() const { - DCHECK_NE(m_start, m_end); - return m_buffer.buffer()[m_start]; - } - T takeFirst(); - - T& back() { - DCHECK_NE(m_start, m_end); - return *(--end()); - } - const T& back() const { - DCHECK_NE(m_start, m_end); - return *(--end()); - } - T takeLast(); - - T& at(size_t i) { - RELEASE_ASSERT(i < size()); - size_t right = m_buffer.capacity() - m_start; - return i < right ? m_buffer.buffer()[m_start + i] - : m_buffer.buffer()[i - right]; - } - const T& at(size_t i) const { - RELEASE_ASSERT(i < size()); - size_t right = m_buffer.capacity() - m_start; - return i < right ? m_buffer.buffer()[m_start + i] - : m_buffer.buffer()[i - right]; - } - - T& operator[](size_t i) { return at(i); } - const T& operator[](size_t i) const { return at(i); } - - template <typename U> - void push_front(U&&); - void erase(iterator&); - void erase(const_iterator&); - - // STL compatibility. - template <typename U> - void push_back(U&&); - void pop_back(); - void pop_front(); - bool empty() const { return isEmpty(); } - template <typename... Args> - void emplace_back(Args&&...); - template <typename... Args> - void emplace_front(Args&&...); - - void clear(); - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher); - - static_assert(!std::is_polymorphic<T>::value || - !VectorTraits<T>::canInitializeWithMemset, - "Cannot initialize with memset if there is a vtable"); - static_assert(Allocator::isGarbageCollected || - !AllowsOnlyPlacementNew<T>::value || - !IsTraceable<T>::value, - "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " - "have trace methods into an off-heap Deque"); - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<T>::value, - "Cannot put raw pointers to garbage-collected classes into a " - "Deque. Use HeapDeque<Member<T>> instead."); - - private: - friend class DequeIteratorBase<T, inlineCapacity, Allocator>; - - class BackingBuffer : public VectorBuffer<T, INLINE_CAPACITY, Allocator> { - WTF_MAKE_NONCOPYABLE(BackingBuffer); - - private: - using Base = VectorBuffer<T, INLINE_CAPACITY, Allocator>; - using Base::m_size; - - public: - BackingBuffer() : Base() {} - explicit BackingBuffer(size_t capacity) : Base(capacity) {} - - void setSize(size_t size) { m_size = size; } - }; - - typedef VectorTypeOperations<T> TypeOperations; - typedef DequeIteratorBase<T, inlineCapacity, Allocator> IteratorBase; - - void erase(size_t position); - void destroyAll(); - void expandCapacityIfNeeded(); - void expandCapacity(); - - BackingBuffer m_buffer; - unsigned m_start; - unsigned m_end; -}; - -template <typename T, size_t inlineCapacity, typename Allocator> -class DequeIteratorBase { - DISALLOW_NEW(); - - protected: - DequeIteratorBase(); - DequeIteratorBase(const Deque<T, inlineCapacity, Allocator>*, size_t); - DequeIteratorBase(const DequeIteratorBase&); - DequeIteratorBase& operator=(const DequeIteratorBase<T, 0, Allocator>&); - ~DequeIteratorBase(); - - void assign(const DequeIteratorBase& other) { *this = other; } - - void increment(); - void decrement(); - - T* before() const; - T* after() const; - - bool isEqual(const DequeIteratorBase&) const; - - private: - Deque<T, inlineCapacity, Allocator>* m_deque; - unsigned m_index; - - friend class Deque<T, inlineCapacity, Allocator>; -}; - -template <typename T, - size_t inlineCapacity = 0, - typename Allocator = PartitionAllocator> -class DequeIterator : public DequeIteratorBase<T, inlineCapacity, Allocator> { - private: - typedef DequeIteratorBase<T, inlineCapacity, Allocator> Base; - typedef DequeIterator<T, inlineCapacity, Allocator> Iterator; - - public: - typedef ptrdiff_t difference_type; - typedef T value_type; - typedef T* pointer; - typedef T& reference; - typedef std::bidirectional_iterator_tag iterator_category; - - DequeIterator(Deque<T, inlineCapacity, Allocator>* deque, size_t index) - : Base(deque, index) {} - - DequeIterator(const Iterator& other) : Base(other) {} - DequeIterator& operator=(const Iterator& other) { - Base::assign(other); - return *this; - } - - T& operator*() const { return *Base::after(); } - T* operator->() const { return Base::after(); } - - bool operator==(const Iterator& other) const { return Base::isEqual(other); } - bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } - - Iterator& operator++() { - Base::increment(); - return *this; - } - // postfix ++ intentionally omitted - Iterator& operator--() { - Base::decrement(); - return *this; - } - // postfix -- intentionally omitted -}; - -template <typename T, - size_t inlineCapacity = 0, - typename Allocator = PartitionAllocator> -class DequeConstIterator - : public DequeIteratorBase<T, inlineCapacity, Allocator> { - private: - typedef DequeIteratorBase<T, inlineCapacity, Allocator> Base; - typedef DequeConstIterator<T, inlineCapacity, Allocator> Iterator; - typedef DequeIterator<T, inlineCapacity, Allocator> NonConstIterator; - - public: - typedef ptrdiff_t difference_type; - typedef T value_type; - typedef const T* pointer; - typedef const T& reference; - typedef std::bidirectional_iterator_tag iterator_category; - - DequeConstIterator(const Deque<T, inlineCapacity, Allocator>* deque, - size_t index) - : Base(deque, index) {} - - DequeConstIterator(const Iterator& other) : Base(other) {} - DequeConstIterator(const NonConstIterator& other) : Base(other) {} - DequeConstIterator& operator=(const Iterator& other) { - Base::assign(other); - return *this; - } - DequeConstIterator& operator=(const NonConstIterator& other) { - Base::assign(other); - return *this; - } - - const T& operator*() const { return *Base::after(); } - const T* operator->() const { return Base::after(); } - - bool operator==(const Iterator& other) const { return Base::isEqual(other); } - bool operator!=(const Iterator& other) const { return !Base::isEqual(other); } - - Iterator& operator++() { - Base::increment(); - return *this; - } - // postfix ++ intentionally omitted - Iterator& operator--() { - Base::decrement(); - return *this; - } - // postfix -- intentionally omitted -}; - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Deque<T, inlineCapacity, Allocator>::Deque() : m_start(0), m_end(0) {} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Deque<T, inlineCapacity, Allocator>::Deque(const Deque& other) - : m_buffer(other.m_buffer.capacity()), - m_start(other.m_start), - m_end(other.m_end) { - const T* otherBuffer = other.m_buffer.buffer(); - if (m_start <= m_end) { - TypeOperations::uninitializedCopy(otherBuffer + m_start, - otherBuffer + m_end, - m_buffer.buffer() + m_start); - } else { - TypeOperations::uninitializedCopy(otherBuffer, otherBuffer + m_end, - m_buffer.buffer()); - TypeOperations::uninitializedCopy(otherBuffer + m_start, - otherBuffer + m_buffer.capacity(), - m_buffer.buffer() + m_start); - } -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Deque<T, inlineCapacity, Allocator>& -Deque<T, inlineCapacity, Allocator>::operator=(const Deque& other) { - Deque<T> copy(other); - swap(copy); - return *this; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Deque<T, inlineCapacity, Allocator>::Deque(Deque&& other) - : m_start(0), m_end(0) { - swap(other); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Deque<T, inlineCapacity, Allocator>& -Deque<T, inlineCapacity, Allocator>::operator=(Deque&& other) { - swap(other); - return *this; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::destroyAll() { - if (m_start <= m_end) { - TypeOperations::destruct(m_buffer.buffer() + m_start, - m_buffer.buffer() + m_end); - m_buffer.clearUnusedSlots(m_buffer.buffer() + m_start, - m_buffer.buffer() + m_end); - } else { - TypeOperations::destruct(m_buffer.buffer(), m_buffer.buffer() + m_end); - m_buffer.clearUnusedSlots(m_buffer.buffer(), m_buffer.buffer() + m_end); - TypeOperations::destruct(m_buffer.buffer() + m_start, - m_buffer.buffer() + m_buffer.capacity()); - m_buffer.clearUnusedSlots(m_buffer.buffer() + m_start, - m_buffer.buffer() + m_buffer.capacity()); - } -} - -// Off-GC-heap deques: Destructor should be called. -// On-GC-heap deques: Destructor should be called for inline buffers (if any) -// but destructor shouldn't be called for vector backing since it is managed by -// the traced GC heap. -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::finalize() { - if (!INLINE_CAPACITY && !m_buffer.buffer()) - return; - if (!isEmpty() && - !(Allocator::isGarbageCollected && m_buffer.hasOutOfLineBuffer())) - destroyAll(); - - m_buffer.destruct(); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::swap(Deque& other) { - typename BackingBuffer::OffsetRange thisHole; - if (m_start <= m_end) { - m_buffer.setSize(m_end); - thisHole.begin = 0; - thisHole.end = m_start; - } else { - m_buffer.setSize(m_buffer.capacity()); - thisHole.begin = m_end; - thisHole.end = m_start; - } - typename BackingBuffer::OffsetRange otherHole; - if (other.m_start <= other.m_end) { - other.m_buffer.setSize(other.m_end); - otherHole.begin = 0; - otherHole.end = other.m_start; - } else { - other.m_buffer.setSize(other.m_buffer.capacity()); - otherHole.begin = other.m_end; - otherHole.end = other.m_start; - } - - m_buffer.swapVectorBuffer(other.m_buffer, thisHole, otherHole); - - std::swap(m_start, other.m_start); - std::swap(m_end, other.m_end); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::clear() { - destroyAll(); - m_start = 0; - m_end = 0; - m_buffer.deallocateBuffer(m_buffer.buffer()); - m_buffer.resetBufferPointer(); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::expandCapacityIfNeeded() { - if (m_start) { - if (m_end + 1 != m_start) - return; - } else if (m_end) { - if (m_end != m_buffer.capacity() - 1) - return; - } else if (m_buffer.capacity()) { - return; - } - - expandCapacity(); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Deque<T, inlineCapacity, Allocator>::expandCapacity() { - size_t oldCapacity = m_buffer.capacity(); - T* oldBuffer = m_buffer.buffer(); - size_t newCapacity = - std::max(static_cast<size_t>(16), oldCapacity + oldCapacity / 4 + 1); - if (m_buffer.expandBuffer(newCapacity)) { - if (m_start <= m_end) { - // No adjustments to be done. - } else { - size_t newStart = m_buffer.capacity() - (oldCapacity - m_start); - TypeOperations::moveOverlapping(oldBuffer + m_start, - oldBuffer + oldCapacity, - m_buffer.buffer() + newStart); - m_buffer.clearUnusedSlots(oldBuffer + m_start, - oldBuffer + std::min(oldCapacity, newStart)); - m_start = newStart; - } - return; - } - m_buffer.allocateBuffer(newCapacity); - if (m_start <= m_end) { - TypeOperations::move(oldBuffer + m_start, oldBuffer + m_end, - m_buffer.buffer() + m_start); - m_buffer.clearUnusedSlots(oldBuffer + m_start, oldBuffer + m_end); - } else { - TypeOperations::move(oldBuffer, oldBuffer + m_end, m_buffer.buffer()); - m_buffer.clearUnusedSlots(oldBuffer, oldBuffer + m_end); - size_t newStart = m_buffer.capacity() - (oldCapacity - m_start); - TypeOperations::move(oldBuffer + m_start, oldBuffer + oldCapacity, - m_buffer.buffer() + newStart); - m_buffer.clearUnusedSlots(oldBuffer + m_start, oldBuffer + oldCapacity); - m_start = newStart; - } - m_buffer.deallocateBuffer(oldBuffer); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline T Deque<T, inlineCapacity, Allocator>::takeFirst() { - T oldFirst = std::move(front()); - pop_front(); - return oldFirst; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline T Deque<T, inlineCapacity, Allocator>::takeLast() { - T oldLast = std::move(back()); - pop_back(); - return oldLast; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -inline void Deque<T, inlineCapacity, Allocator>::push_back(U&& value) { - expandCapacityIfNeeded(); - T* newElement = &m_buffer.buffer()[m_end]; - if (m_end == m_buffer.capacity() - 1) - m_end = 0; - else - ++m_end; - new (NotNull, newElement) T(std::forward<U>(value)); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -inline void Deque<T, inlineCapacity, Allocator>::push_front(U&& value) { - expandCapacityIfNeeded(); - if (!m_start) - m_start = m_buffer.capacity() - 1; - else - --m_start; - new (NotNull, &m_buffer.buffer()[m_start]) T(std::forward<U>(value)); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename... Args> -inline void Deque<T, inlineCapacity, Allocator>::emplace_back(Args&&... args) { - expandCapacityIfNeeded(); - T* newElement = &m_buffer.buffer()[m_end]; - if (m_end == m_buffer.capacity() - 1) - m_end = 0; - else - ++m_end; - new (NotNull, newElement) T(std::forward<Args>(args)...); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename... Args> -inline void Deque<T, inlineCapacity, Allocator>::emplace_front(Args&&... args) { - expandCapacityIfNeeded(); - if (!m_start) - m_start = m_buffer.capacity() - 1; - else - --m_start; - new (NotNull, &m_buffer.buffer()[m_start]) T(std::forward<Args>(args)...); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::pop_front() { - DCHECK(!isEmpty()); - TypeOperations::destruct(&m_buffer.buffer()[m_start], - &m_buffer.buffer()[m_start + 1]); - m_buffer.clearUnusedSlots(&m_buffer.buffer()[m_start], - &m_buffer.buffer()[m_start + 1]); - if (m_start == m_buffer.capacity() - 1) - m_start = 0; - else - ++m_start; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::pop_back() { - DCHECK(!isEmpty()); - if (!m_end) - m_end = m_buffer.capacity() - 1; - else - --m_end; - TypeOperations::destruct(&m_buffer.buffer()[m_end], - &m_buffer.buffer()[m_end + 1]); - m_buffer.clearUnusedSlots(&m_buffer.buffer()[m_end], - &m_buffer.buffer()[m_end + 1]); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::erase(iterator& it) { - erase(it.m_index); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::erase(const_iterator& it) { - erase(it.m_index); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Deque<T, inlineCapacity, Allocator>::erase(size_t position) { - if (position == m_end) - return; - - T* buffer = m_buffer.buffer(); - TypeOperations::destruct(&buffer[position], &buffer[position + 1]); - - // Find which segment of the circular buffer contained the remove element, - // and only move elements in that part. - if (position >= m_start) { - TypeOperations::moveOverlapping(buffer + m_start, buffer + position, - buffer + m_start + 1); - m_buffer.clearUnusedSlots(buffer + m_start, buffer + m_start + 1); - m_start = (m_start + 1) % m_buffer.capacity(); - } else { - TypeOperations::moveOverlapping(buffer + position + 1, buffer + m_end, - buffer + position); - m_buffer.clearUnusedSlots(buffer + m_end - 1, buffer + m_end); - m_end = (m_end - 1 + m_buffer.capacity()) % m_buffer.capacity(); - } -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline DequeIteratorBase<T, inlineCapacity, Allocator>::DequeIteratorBase() - : m_deque(0) {} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline DequeIteratorBase<T, inlineCapacity, Allocator>::DequeIteratorBase( - const Deque<T, inlineCapacity, Allocator>* deque, - size_t index) - : m_deque(const_cast<Deque<T, inlineCapacity, Allocator>*>(deque)), - m_index(index) {} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline DequeIteratorBase<T, inlineCapacity, Allocator>::DequeIteratorBase( - const DequeIteratorBase& other) - : m_deque(other.m_deque), m_index(other.m_index) {} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline DequeIteratorBase<T, inlineCapacity, Allocator>& -DequeIteratorBase<T, inlineCapacity, Allocator>::operator=( - const DequeIteratorBase<T, 0, Allocator>& other) { - m_deque = other.m_deque; - m_index = other.m_index; - return *this; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline DequeIteratorBase<T, inlineCapacity, Allocator>::~DequeIteratorBase() {} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline bool DequeIteratorBase<T, inlineCapacity, Allocator>::isEqual( - const DequeIteratorBase& other) const { - return m_index == other.m_index; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void DequeIteratorBase<T, inlineCapacity, Allocator>::increment() { - DCHECK_NE(m_index, m_deque->m_end); - DCHECK(m_deque->m_buffer.capacity()); - if (m_index == m_deque->m_buffer.capacity() - 1) - m_index = 0; - else - ++m_index; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void DequeIteratorBase<T, inlineCapacity, Allocator>::decrement() { - DCHECK_NE(m_index, m_deque->m_start); - DCHECK(m_deque->m_buffer.capacity()); - if (!m_index) - m_index = m_deque->m_buffer.capacity() - 1; - else - --m_index; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline T* DequeIteratorBase<T, inlineCapacity, Allocator>::after() const { - RELEASE_ASSERT(m_index != m_deque->m_end); - return &m_deque->m_buffer.buffer()[m_index]; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline T* DequeIteratorBase<T, inlineCapacity, Allocator>::before() const { - RELEASE_ASSERT(m_index != m_deque->m_start); - if (!m_index) - return &m_deque->m_buffer.buffer()[m_deque->m_buffer.capacity() - 1]; - return &m_deque->m_buffer.buffer()[m_index - 1]; -} - -// This is only called if the allocator is a HeapAllocator. It is used when -// visiting during a tracing GC. -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename VisitorDispatcher> -void Deque<T, inlineCapacity, Allocator>::trace(VisitorDispatcher visitor) { - DCHECK(Allocator::isGarbageCollected) << "Garbage collector must be enabled."; - const T* bufferBegin = m_buffer.buffer(); - const T* end = bufferBegin + m_end; - if (IsTraceableInCollectionTrait<VectorTraits<T>>::value) { - if (m_start <= m_end) { - for (const T* bufferEntry = bufferBegin + m_start; bufferEntry != end; - bufferEntry++) - Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( - visitor, *const_cast<T*>(bufferEntry)); - } else { - for (const T* bufferEntry = bufferBegin; bufferEntry != end; - bufferEntry++) - Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( - visitor, *const_cast<T*>(bufferEntry)); - const T* bufferEnd = m_buffer.buffer() + m_buffer.capacity(); - for (const T* bufferEntry = bufferBegin + m_start; - bufferEntry != bufferEnd; bufferEntry++) - Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( - visitor, *const_cast<T*>(bufferEntry)); - } - } - if (m_buffer.hasOutOfLineBuffer()) { - Allocator::markNoTracing(visitor, m_buffer.buffer()); - Allocator::registerBackingStoreReference(visitor, m_buffer.bufferSlot()); - } -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void swap(Deque<T, inlineCapacity, Allocator>& a, - Deque<T, inlineCapacity, Allocator>& b) { - a.swap(b); -} - -} // namespace WTF - -using WTF::Deque; - -#endif // WTF_Deque_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/DoublyLinkedList.h b/third_party/WebKit/Source/wtf/DoublyLinkedList.h index 16e0d411..39fd5ad 100644 --- a/third_party/WebKit/Source/wtf/DoublyLinkedList.h +++ b/third_party/WebKit/Source/wtf/DoublyLinkedList.h
@@ -1,197 +1,9 @@ -/* - * Copyright (C) 2011 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' - * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, - * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF - * THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef DoublyLinkedList_h -#define DoublyLinkedList_h +#include "platform/wtf/DoublyLinkedList.h" -#include "wtf/Allocator.h" - -namespace WTF { - -// This class allows nodes to share code without dictating data member layout. -template <typename T> -class DoublyLinkedListNode { - public: - DoublyLinkedListNode(); - - void setPrev(T*); - void setNext(T*); - - T* prev() const; - T* next() const; -}; - -template <typename T> -inline DoublyLinkedListNode<T>::DoublyLinkedListNode() { - setPrev(0); - setNext(0); -} - -template <typename T> -inline void DoublyLinkedListNode<T>::setPrev(T* prev) { - static_cast<T*>(this)->m_prev = prev; -} - -template <typename T> -inline void DoublyLinkedListNode<T>::setNext(T* next) { - static_cast<T*>(this)->m_next = next; -} - -template <typename T> -inline T* DoublyLinkedListNode<T>::prev() const { - return static_cast<const T*>(this)->m_prev; -} - -template <typename T> -inline T* DoublyLinkedListNode<T>::next() const { - return static_cast<const T*>(this)->m_next; -} - -template <typename T> -class DoublyLinkedList { - USING_FAST_MALLOC(DoublyLinkedList); - - public: - DoublyLinkedList(); - - bool isEmpty() const; - size_t size() const; // This is O(n). - void clear(); - - T* head() const; - T* removeHead(); - - T* tail() const; - - void push(T*); - void append(T*); - void remove(T*); - - private: - T* m_head; - T* m_tail; -}; - -template <typename T> -inline DoublyLinkedList<T>::DoublyLinkedList() : m_head(0), m_tail(0) {} - -template <typename T> -inline bool DoublyLinkedList<T>::isEmpty() const { - return !m_head; -} - -template <typename T> -inline size_t DoublyLinkedList<T>::size() const { - size_t size = 0; - for (T* node = m_head; node; node = node->next()) - ++size; - return size; -} - -template <typename T> -inline void DoublyLinkedList<T>::clear() { - m_head = 0; - m_tail = 0; -} - -template <typename T> -inline T* DoublyLinkedList<T>::head() const { - return m_head; -} - -template <typename T> -inline T* DoublyLinkedList<T>::tail() const { - return m_tail; -} - -template <typename T> -inline void DoublyLinkedList<T>::push(T* node) { - if (!m_head) { - DCHECK(!m_tail); - m_head = node; - m_tail = node; - node->setPrev(0); - node->setNext(0); - return; - } - - DCHECK(m_tail); - m_head->setPrev(node); - node->setNext(m_head); - node->setPrev(0); - m_head = node; -} - -template <typename T> -inline void DoublyLinkedList<T>::append(T* node) { - if (!m_tail) { - DCHECK(!m_head); - m_head = node; - m_tail = node; - node->setPrev(0); - node->setNext(0); - return; - } - - DCHECK(m_head); - m_tail->setNext(node); - node->setPrev(m_tail); - node->setNext(0); - m_tail = node; -} - -template <typename T> -inline void DoublyLinkedList<T>::remove(T* node) { - if (node->prev()) { - DCHECK_NE(node, m_head); - node->prev()->setNext(node->next()); - } else { - DCHECK_EQ(node, m_head); - m_head = node->next(); - } - - if (node->next()) { - DCHECK_NE(node, m_tail); - node->next()->setPrev(node->prev()); - } else { - DCHECK_EQ(node, m_tail); - m_tail = node->prev(); - } -} - -template <typename T> -inline T* DoublyLinkedList<T>::removeHead() { - T* node = head(); - if (node) - remove(node); - return node; -} - -} // namespace WTF - -using WTF::DoublyLinkedListNode; -using WTF::DoublyLinkedList; - -#endif +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/FilePrintStream.h b/third_party/WebKit/Source/wtf/FilePrintStream.h index 46af3de..56e5c54 100644 --- a/third_party/WebKit/Source/wtf/FilePrintStream.h +++ b/third_party/WebKit/Source/wtf/FilePrintStream.h
@@ -1,60 +1,9 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef FilePrintStream_h -#define FilePrintStream_h +#include "platform/wtf/FilePrintStream.h" -#include "wtf/Compiler.h" -#include "wtf/PrintStream.h" -#include <memory> -#include <stdio.h> - -namespace WTF { - -class WTF_EXPORT FilePrintStream final : public PrintStream { - public: - enum AdoptionMode { Adopt, Borrow }; - - FilePrintStream(FILE*, AdoptionMode = Adopt); - ~FilePrintStream() override; - - static std::unique_ptr<FilePrintStream> open(const char* filename, - const char* mode); - - FILE* file() { return m_file; } - - PRINTF_FORMAT(2, 0) void vprintf(const char* format, va_list) override; - void flush() override; - - private: - FILE* m_file; - AdoptionMode m_adoptionMode; -}; - -} // namespace WTF - -using WTF::FilePrintStream; - -#endif // FilePrintStream_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashCountedSet.h b/third_party/WebKit/Source/wtf/HashCountedSet.h index 6c3db07..39e01960 100644 --- a/third_party/WebKit/Source/wtf/HashCountedSet.h +++ b/third_party/WebKit/Source/wtf/HashCountedSet.h
@@ -1,188 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashCountedSet_h -#define WTF_HashCountedSet_h +#include "platform/wtf/HashCountedSet.h" -#include "wtf/Assertions.h" -#include "wtf/HashMap.h" -#include "wtf/Vector.h" -#include "wtf/allocator/PartitionAllocator.h" - -namespace WTF { - -// An unordered hash set that keeps track of how many times you added an item to -// the set. The iterators have fields ->key and ->value that return the set -// members and their counts, respectively. -template <typename Value, - typename HashFunctions = typename DefaultHash<Value>::Hash, - typename Traits = HashTraits<Value>, - typename Allocator = PartitionAllocator> -class HashCountedSet { - USE_ALLOCATOR(HashCountedSet, Allocator); - WTF_MAKE_NONCOPYABLE(HashCountedSet); - - private: - typedef HashMap<Value, - unsigned, - HashFunctions, - Traits, - HashTraits<unsigned>, - Allocator> - ImplType; - - public: - typedef Value ValueType; - using value_type = ValueType; - typedef typename ImplType::iterator iterator; - typedef typename ImplType::const_iterator const_iterator; - typedef typename ImplType::AddResult AddResult; - - HashCountedSet() { - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<Value>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap HashCountedSet. Use " - "HeapHashCountedSet<Member<T>> instead."); - } - - void swap(HashCountedSet& other) { m_impl.swap(other.m_impl); } - - unsigned size() const { return m_impl.size(); } - unsigned capacity() const { return m_impl.capacity(); } - bool isEmpty() const { return m_impl.isEmpty(); } - - // Iterators iterate over pairs of values (called key) and counts (called - // value). - iterator begin() { return m_impl.begin(); } - iterator end() { return m_impl.end(); } - const_iterator begin() const { return m_impl.begin(); } - const_iterator end() const { return m_impl.end(); } - - iterator find(const ValueType& value) { return m_impl.find(value); } - const_iterator find(const ValueType& value) const { - return m_impl.find(value); - } - bool contains(const ValueType& value) const { return m_impl.contains(value); } - unsigned count(const ValueType& value) const { return m_impl.at(value); } - - // Increases the count if an equal value is already present the return value - // is a pair of an iterator to the new value's location, and a bool that is - // true if an new entry was added. - AddResult add(const ValueType&); - - // Generalized add(), adding the value N times. - AddResult add(const ValueType&, unsigned); - - // Reduces the count of the value, and removes it if count goes down to - // zero, returns true if the value is removed. - bool remove(const ValueType& value) { return remove(find(value)); } - bool remove(iterator); - - // Removes the value, regardless of its count. - void removeAll(const ValueType& value) { removeAll(find(value)); } - void removeAll(iterator); - - // Clears the whole set. - void clear() { m_impl.clear(); } - - Vector<Value> asVector() const; - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - m_impl.trace(visitor); - } - - private: - ImplType m_impl; -}; - -template <typename T, typename U, typename V, typename W> -inline typename HashCountedSet<T, U, V, W>::AddResult -HashCountedSet<T, U, V, W>::add(const ValueType& value, unsigned count) { - DCHECK_GT(count, 0u); - AddResult result = m_impl.insert(value, 0); - result.storedValue->value += count; - return result; -} - -template <typename T, typename U, typename V, typename W> -inline typename HashCountedSet<T, U, V, W>::AddResult -HashCountedSet<T, U, V, W>::add(const ValueType& value) { - return add(value, 1u); -} - -template <typename T, typename U, typename V, typename W> -inline bool HashCountedSet<T, U, V, W>::remove(iterator it) { - if (it == end()) - return false; - - unsigned oldVal = it->value; - DCHECK(oldVal); - unsigned newVal = oldVal - 1; - if (newVal) { - it->value = newVal; - return false; - } - - m_impl.erase(it); - return true; -} - -template <typename T, typename U, typename V, typename W> -inline void HashCountedSet<T, U, V, W>::removeAll(iterator it) { - if (it == end()) - return; - - m_impl.erase(it); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator, - typename VectorType> -inline void copyToVector( - const HashCountedSet<Value, HashFunctions, Traits, Allocator>& collection, - VectorType& vector) { - { - // Disallow GC across resize allocation, see crbug.com/568173 - typename VectorType::GCForbiddenScope scope; - vector.resize(collection.size()); - } - - auto it = collection.begin(); - auto end = collection.end(); - for (unsigned i = 0; it != end; ++it, ++i) - vector[i] = (*it).key; -} - -template <typename T, typename U, typename V, typename W> -inline Vector<T> HashCountedSet<T, U, V, W>::asVector() const { - Vector<T> vector; - copyToVector(*this, vector); - return vector; -} - -} // namespace WTF - -using WTF::HashCountedSet; - -#endif // WTF_HashCountedSet_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashFunctions.h b/third_party/WebKit/Source/wtf/HashFunctions.h index c3d90d0..4872943 100644 --- a/third_party/WebKit/Source/wtf/HashFunctions.h +++ b/third_party/WebKit/Source/wtf/HashFunctions.h
@@ -1,291 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2008 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashFunctions_h -#define WTF_HashFunctions_h +#include "platform/wtf/HashFunctions.h" -#include "wtf/RefPtr.h" -#include "wtf/StdLibExtras.h" -#include <memory> -#include <stdint.h> -#include <type_traits> - -namespace WTF { - -template <size_t size> -struct IntTypes; -template <> -struct IntTypes<1> { - typedef int8_t SignedType; - typedef uint8_t UnsignedType; -}; -template <> -struct IntTypes<2> { - typedef int16_t SignedType; - typedef uint16_t UnsignedType; -}; -template <> -struct IntTypes<4> { - typedef int32_t SignedType; - typedef uint32_t UnsignedType; -}; -template <> -struct IntTypes<8> { - typedef int64_t SignedType; - typedef uint64_t UnsignedType; -}; - -// integer hash function - -// Thomas Wang's 32 Bit Mix Function: -// http://www.cris.com/~Ttwang/tech/inthash.htm -inline unsigned hashInt(uint8_t key8) { - unsigned key = key8; - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} - -// Thomas Wang's 32 Bit Mix Function: -// http://www.cris.com/~Ttwang/tech/inthash.htm -inline unsigned hashInt(uint16_t key16) { - unsigned key = key16; - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} - -// Thomas Wang's 32 Bit Mix Function: -// http://www.cris.com/~Ttwang/tech/inthash.htm -inline unsigned hashInt(uint32_t key) { - key += ~(key << 15); - key ^= (key >> 10); - key += (key << 3); - key ^= (key >> 6); - key += ~(key << 11); - key ^= (key >> 16); - return key; -} - -// Thomas Wang's 64 bit Mix Function: -// http://www.cris.com/~Ttwang/tech/inthash.htm -inline unsigned hashInt(uint64_t key) { - key += ~(key << 32); - key ^= (key >> 22); - key += ~(key << 13); - key ^= (key >> 8); - key += (key << 3); - key ^= (key >> 15); - key += ~(key << 27); - key ^= (key >> 31); - return static_cast<unsigned>(key); -} - -// Compound integer hash method: -// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000 -inline unsigned hashInts(unsigned key1, unsigned key2) { - unsigned shortRandom1 = 277951225; // A random 32-bit value. - unsigned shortRandom2 = 95187966; // A random 32-bit value. - uint64_t longRandom = 19248658165952623LL; // A random, odd 64-bit value. - - uint64_t product = - longRandom * shortRandom1 * key1 + longRandom * shortRandom2 * key2; - unsigned highBits = static_cast<unsigned>( - product >> (8 * (sizeof(uint64_t) - sizeof(unsigned)))); - return highBits; -} - -template <typename T> -struct IntHash { - static unsigned hash(T key) { - return hashInt( - static_cast<typename IntTypes<sizeof(T)>::UnsignedType>(key)); - } - static bool equal(T a, T b) { return a == b; } - static const bool safeToCompareToEmptyOrDeleted = true; -}; - -template <typename T> -struct FloatHash { - typedef typename IntTypes<sizeof(T)>::UnsignedType Bits; - static unsigned hash(T key) { return hashInt(bitwiseCast<Bits>(key)); } - static bool equal(T a, T b) { - return bitwiseCast<Bits>(a) == bitwiseCast<Bits>(b); - } - static const bool safeToCompareToEmptyOrDeleted = true; -}; - -// pointer identity hash function - -template <typename T> -struct PtrHash { - static unsigned hash(T* key) { -#if COMPILER(MSVC) -#pragma warning(push) -// work around what seems to be a bug in MSVC's conversion warnings -#pragma warning(disable : 4244) -#endif - return IntHash<uintptr_t>::hash(reinterpret_cast<uintptr_t>(key)); -#if COMPILER(MSVC) -#pragma warning(pop) -#endif - } - static bool equal(T* a, T* b) { return a == b; } - static bool equal(std::nullptr_t, T* b) { return !b; } - static bool equal(T* a, std::nullptr_t) { return !a; } - static const bool safeToCompareToEmptyOrDeleted = true; -}; - -template <typename T> -struct RefPtrHash : PtrHash<T> { - using PtrHash<T>::hash; - static unsigned hash(const RefPtr<T>& key) { return hash(key.get()); } - static unsigned hash(const PassRefPtr<T>& key) { return hash(key.get()); } - using PtrHash<T>::equal; - static bool equal(const RefPtr<T>& a, const RefPtr<T>& b) { return a == b; } - static bool equal(T* a, const RefPtr<T>& b) { return a == b; } - static bool equal(const RefPtr<T>& a, T* b) { return a == b; } - static bool equal(const RefPtr<T>& a, const PassRefPtr<T>& b) { - return a == b; - } -}; - -template <typename T> -struct UniquePtrHash : PtrHash<T> { - using PtrHash<T>::hash; - static unsigned hash(const std::unique_ptr<T>& key) { - return hash(key.get()); - } - static bool equal(const std::unique_ptr<T>& a, const std::unique_ptr<T>& b) { - return a == b; - } - static bool equal(const std::unique_ptr<T>& a, const T* b) { - return a.get() == b; - } - static bool equal(const T* a, const std::unique_ptr<T>& b) { - return a == b.get(); - } -}; - -// Default hash function for each type. -template <typename T> -struct DefaultHash; - -// Actual implementation of DefaultHash. -// -// The case of |isIntegral| == false is not implemented. If you see a compile -// error saying DefaultHashImpl<T, false> is not defined, that's because the -// default hash functions for T are not defined. You need to implement them -// yourself. -template <typename T, bool isIntegral> -struct DefaultHashImpl; - -template <typename T> -struct DefaultHashImpl<T, true> { - using Hash = IntHash<typename std::make_unsigned<T>::type>; -}; - -// Canonical implementation of DefaultHash. -template <typename T> -struct DefaultHash : DefaultHashImpl<T, std::is_integral<T>::value> {}; - -// Specializations of DefaultHash follow. -template <> -struct DefaultHash<float> { - using Hash = FloatHash<float>; -}; -template <> -struct DefaultHash<double> { - using Hash = FloatHash<double>; -}; - -// Specializations for pointer types. -template <typename T> -struct DefaultHash<T*> { - using Hash = PtrHash<T>; -}; -template <typename T> -struct DefaultHash<RefPtr<T>> { - using Hash = RefPtrHash<T>; -}; -template <typename T> -struct DefaultHash<std::unique_ptr<T>> { - using Hash = UniquePtrHash<T>; -}; - -// Specializations for pairs. - -// Generic case (T or U is non-integral): -template <typename T, typename U, bool areBothIntegral> -struct PairHashImpl { - static unsigned hash(const std::pair<T, U>& p) { - return hashInts(DefaultHash<T>::Hash::hash(p.first), - DefaultHash<U>::Hash::hash(p.second)); - } - static bool equal(const std::pair<T, U>& a, const std::pair<T, U>& b) { - return DefaultHash<T>::Hash::equal(a.first, b.first) && - DefaultHash<U>::Hash::equal(a.second, b.second); - } - static const bool safeToCompareToEmptyOrDeleted = - DefaultHash<T>::Hash::safeToCompareToEmptyOrDeleted && - DefaultHash<U>::Hash::safeToCompareToEmptyOrDeleted; -}; - -// Special version for pairs of integrals: -template <typename T, typename U> -struct PairHashImpl<T, U, true> { - static unsigned hash(const std::pair<T, U>& p) { - return hashInts(p.first, p.second); - } - static bool equal(const std::pair<T, U>& a, const std::pair<T, U>& b) { - return PairHashImpl<T, U, false>::equal( - a, b); // Refer to the generic version. - } - static const bool safeToCompareToEmptyOrDeleted = - PairHashImpl<T, U, false>::safeToCompareToEmptyOrDeleted; -}; - -// Combined version: -template <typename T, typename U> -struct PairHash - : PairHashImpl<T, - U, - std::is_integral<T>::value && std::is_integral<U>::value> {}; - -template <typename T, typename U> -struct DefaultHash<std::pair<T, U>> { - using Hash = PairHash<T, U>; -}; - -} // namespace WTF - -using WTF::DefaultHash; -using WTF::IntHash; -using WTF::PtrHash; - -#endif // WTF_HashFunctions_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashIterators.h b/third_party/WebKit/Source/wtf/HashIterators.h index 73b813d..2b79b27 100644 --- a/third_party/WebKit/Source/wtf/HashIterators.h +++ b/third_party/WebKit/Source/wtf/HashIterators.h
@@ -1,282 +1,9 @@ -/* - * Copyright (C) 2007 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashIterators_h -#define WTF_HashIterators_h +#include "platform/wtf/HashIterators.h" -#include "wtf/Allocator.h" - -namespace WTF { - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableConstKeysIterator; -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableConstValuesIterator; -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableKeysIterator; -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableValuesIterator; - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableConstIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> { - STACK_ALLOCATED(); - - private: - typedef KeyValuePair<KeyType, MappedType> ValueType; - - public: - typedef HashTableConstKeysIterator<HashTableType, KeyType, MappedType> - KeysIterator; - typedef HashTableConstValuesIterator<HashTableType, KeyType, MappedType> - ValuesIterator; - - HashTableConstIteratorAdapter() {} - HashTableConstIteratorAdapter( - const typename HashTableType::const_iterator& impl) - : m_impl(impl) {} - - const ValueType* get() const { return (const ValueType*)m_impl.get(); } - const ValueType& operator*() const { return *get(); } - const ValueType* operator->() const { return get(); } - - HashTableConstIteratorAdapter& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - KeysIterator keys() { return KeysIterator(*this); } - ValuesIterator values() { return ValuesIterator(*this); } - - typename HashTableType::const_iterator m_impl; -}; - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> { - STACK_ALLOCATED(); - - private: - typedef KeyValuePair<KeyType, MappedType> ValueType; - - public: - typedef HashTableKeysIterator<HashTableType, KeyType, MappedType> - KeysIterator; - typedef HashTableValuesIterator<HashTableType, KeyType, MappedType> - ValuesIterator; - - HashTableIteratorAdapter() {} - HashTableIteratorAdapter(const typename HashTableType::iterator& impl) - : m_impl(impl) {} - - ValueType* get() const { return (ValueType*)m_impl.get(); } - ValueType& operator*() const { return *get(); } - ValueType* operator->() const { return get(); } - - HashTableIteratorAdapter& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - operator HashTableConstIteratorAdapter<HashTableType, ValueType>() { - typename HashTableType::const_iterator i = m_impl; - return i; - } - - KeysIterator keys() { return KeysIterator(*this); } - ValuesIterator values() { return ValuesIterator(*this); } - - typename HashTableType::iterator m_impl; -}; - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableConstKeysIterator { - STACK_ALLOCATED(); - - private: - typedef HashTableConstIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> - ConstIterator; - - public: - HashTableConstKeysIterator(const ConstIterator& impl) : m_impl(impl) {} - - const KeyType* get() const { return &(m_impl.get()->key); } - const KeyType& operator*() const { return *get(); } - const KeyType* operator->() const { return get(); } - - HashTableConstKeysIterator& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - ConstIterator m_impl; -}; - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableConstValuesIterator { - STACK_ALLOCATED(); - - private: - typedef HashTableConstIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> - ConstIterator; - - public: - HashTableConstValuesIterator(const ConstIterator& impl) : m_impl(impl) {} - - const MappedType* get() const { return &(m_impl.get()->value); } - const MappedType& operator*() const { return *get(); } - const MappedType* operator->() const { return get(); } - - HashTableConstValuesIterator& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - ConstIterator m_impl; -}; - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableKeysIterator { - STACK_ALLOCATED(); - - private: - typedef HashTableIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> - Iterator; - typedef HashTableConstIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> - ConstIterator; - - public: - HashTableKeysIterator(const Iterator& impl) : m_impl(impl) {} - - KeyType* get() const { return &(m_impl.get()->key); } - KeyType& operator*() const { return *get(); } - KeyType* operator->() const { return get(); } - - HashTableKeysIterator& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - operator HashTableConstKeysIterator<HashTableType, KeyType, MappedType>() { - ConstIterator i = m_impl; - return i; - } - - Iterator m_impl; -}; - -template <typename HashTableType, typename KeyType, typename MappedType> -struct HashTableValuesIterator { - STACK_ALLOCATED(); - - private: - typedef HashTableIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> - Iterator; - typedef HashTableConstIteratorAdapter<HashTableType, - KeyValuePair<KeyType, MappedType>> - ConstIterator; - - public: - HashTableValuesIterator(const Iterator& impl) : m_impl(impl) {} - - MappedType* get() const { return &(m_impl.get()->value); } - MappedType& operator*() const { return *get(); } - MappedType* operator->() const { return get(); } - - HashTableValuesIterator& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - operator HashTableConstValuesIterator<HashTableType, KeyType, MappedType>() { - ConstIterator i = m_impl; - return i; - } - - Iterator m_impl; -}; - -template <typename T, typename U, typename V> -inline bool operator==(const HashTableConstKeysIterator<T, U, V>& a, - const HashTableConstKeysIterator<T, U, V>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator!=(const HashTableConstKeysIterator<T, U, V>& a, - const HashTableConstKeysIterator<T, U, V>& b) { - return a.m_impl != b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator==(const HashTableConstValuesIterator<T, U, V>& a, - const HashTableConstValuesIterator<T, U, V>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator!=(const HashTableConstValuesIterator<T, U, V>& a, - const HashTableConstValuesIterator<T, U, V>& b) { - return a.m_impl != b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator==(const HashTableKeysIterator<T, U, V>& a, - const HashTableKeysIterator<T, U, V>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator!=(const HashTableKeysIterator<T, U, V>& a, - const HashTableKeysIterator<T, U, V>& b) { - return a.m_impl != b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator==(const HashTableValuesIterator<T, U, V>& a, - const HashTableValuesIterator<T, U, V>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U, typename V> -inline bool operator!=(const HashTableValuesIterator<T, U, V>& a, - const HashTableValuesIterator<T, U, V>& b) { - return a.m_impl != b.m_impl; -} - -} // namespace WTF - -#endif // WTF_HashIterators_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashMap.h b/third_party/WebKit/Source/wtf/HashMap.h index d111458b..525c4f26 100644 --- a/third_party/WebKit/Source/wtf/HashMap.h +++ b/third_party/WebKit/Source/wtf/HashMap.h
@@ -1,746 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008, 2011 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashMap_h -#define WTF_HashMap_h +#include "platform/wtf/HashMap.h" -#include "wtf/HashTable.h" -#include "wtf/allocator/PartitionAllocator.h" -#include <initializer_list> - -namespace WTF { - -template <typename KeyTraits, typename MappedTraits> -struct HashMapValueTraits; - -struct KeyValuePairKeyExtractor { - STATIC_ONLY(KeyValuePairKeyExtractor); - template <typename T> - static const typename T::KeyType& extract(const T& p) { - return p.key; - } -}; - -// Note: empty or deleted key values are not allowed, using them may lead to -// undefined behavior. For pointer keys this means that null pointers are not -// allowed unless you supply custom key traits. -template <typename KeyArg, - typename MappedArg, - typename HashArg = typename DefaultHash<KeyArg>::Hash, - typename KeyTraitsArg = HashTraits<KeyArg>, - typename MappedTraitsArg = HashTraits<MappedArg>, - typename Allocator = PartitionAllocator> -class HashMap { - USE_ALLOCATOR(HashMap, Allocator); - - private: - typedef KeyTraitsArg KeyTraits; - typedef MappedTraitsArg MappedTraits; - typedef HashMapValueTraits<KeyTraits, MappedTraits> ValueTraits; - - public: - typedef typename KeyTraits::TraitType KeyType; - typedef const typename KeyTraits::PeekInType& KeyPeekInType; - typedef typename MappedTraits::TraitType MappedType; - typedef typename ValueTraits::TraitType ValueType; - using value_type = ValueType; - - private: - typedef typename MappedTraits::PeekOutType MappedPeekType; - - typedef HashArg HashFunctions; - - typedef HashTable<KeyType, - ValueType, - KeyValuePairKeyExtractor, - HashFunctions, - ValueTraits, - KeyTraits, - Allocator> - HashTableType; - - class HashMapKeysProxy; - class HashMapValuesProxy; - - public: - HashMap() { - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<KeyArg>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap HashMap. Use HeapHashMap<> instead."); - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<MappedArg>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap HashMap. Use HeapHashMap<> instead."); - } - HashMap(const HashMap&) = default; - HashMap& operator=(const HashMap&) = default; - HashMap(HashMap&&) = default; - HashMap& operator=(HashMap&&) = default; - - // For example, HashMap<int, int>({{1, 11}, {2, 22}, {3, 33}}) will give you - // a HashMap containing a mapping {1 -> 11, 2 -> 22, 3 -> 33}. - HashMap(std::initializer_list<ValueType> elements); - HashMap& operator=(std::initializer_list<ValueType> elements); - - typedef HashTableIteratorAdapter<HashTableType, ValueType> iterator; - typedef HashTableConstIteratorAdapter<HashTableType, ValueType> - const_iterator; - typedef typename HashTableType::AddResult AddResult; - - void swap(HashMap& ref) { m_impl.swap(ref.m_impl); } - - unsigned size() const; - unsigned capacity() const; - void reserveCapacityForSize(unsigned size) { - m_impl.reserveCapacityForSize(size); - } - - bool isEmpty() const; - - // iterators iterate over pairs of keys and values - iterator begin(); - iterator end(); - const_iterator begin() const; - const_iterator end() const; - - HashMapKeysProxy& keys() { return static_cast<HashMapKeysProxy&>(*this); } - const HashMapKeysProxy& keys() const { - return static_cast<const HashMapKeysProxy&>(*this); - } - - HashMapValuesProxy& values() { - return static_cast<HashMapValuesProxy&>(*this); - } - const HashMapValuesProxy& values() const { - return static_cast<const HashMapValuesProxy&>(*this); - } - - iterator find(KeyPeekInType); - const_iterator find(KeyPeekInType) const; - bool contains(KeyPeekInType) const; - MappedPeekType at(KeyPeekInType) const; - - // replaces value but not key if key is already present return value is a - // pair of the iterator to the key location, and a boolean that's true if a - // new value was actually added - template <typename IncomingKeyType, typename IncomingMappedType> - AddResult set(IncomingKeyType&&, IncomingMappedType&&); - - // does nothing if key is already present return value is a pair of the - // iterator to the key location, and a boolean that's true if a new value - // was actually added - template <typename IncomingKeyType, typename IncomingMappedType> - AddResult insert(IncomingKeyType&&, IncomingMappedType&&); - - void erase(KeyPeekInType); - void erase(iterator); - void clear(); - template <typename Collection> - void removeAll(const Collection& toBeRemoved) { - WTF::removeAll(*this, toBeRemoved); - } - - MappedType take(KeyPeekInType); // efficient combination of get with remove - - // An alternate version of find() that finds the object by hashing and - // comparing with some other type, to avoid the cost of type - // conversion. HashTranslator must have the following function members: - // static unsigned hash(const T&); - // static bool equal(const ValueType&, const T&); - template <typename HashTranslator, typename T> - iterator find(const T&); - template <typename HashTranslator, typename T> - const_iterator find(const T&) const; - template <typename HashTranslator, typename T> - bool contains(const T&) const; - - // An alternate version of insert() that finds the object by hashing and - // comparing with some other type, to avoid the cost of type conversion if - // the object is already in the table. HashTranslator must have the - // following function members: - // static unsigned hash(const T&); - // static bool equal(const ValueType&, const T&); - // static translate(ValueType&, const T&, unsigned hashCode); - template <typename HashTranslator, - typename IncomingKeyType, - typename IncomingMappedType> - AddResult insert(IncomingKeyType&&, IncomingMappedType&&); - - static bool isValidKey(KeyPeekInType); - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - m_impl.trace(visitor); - } - - private: - template <typename IncomingKeyType, typename IncomingMappedType> - AddResult inlineAdd(IncomingKeyType&&, IncomingMappedType&&); - - HashTableType m_impl; -}; - -template <typename KeyArg, - typename MappedArg, - typename HashArg, - typename KeyTraitsArg, - typename MappedTraitsArg, - typename Allocator> -class HashMap<KeyArg, - MappedArg, - HashArg, - KeyTraitsArg, - MappedTraitsArg, - Allocator>::HashMapKeysProxy : private HashMap<KeyArg, - MappedArg, - HashArg, - KeyTraitsArg, - MappedTraitsArg, - Allocator> { - DISALLOW_NEW(); - - public: - typedef HashMap<KeyArg, - MappedArg, - HashArg, - KeyTraitsArg, - MappedTraitsArg, - Allocator> - HashMapType; - typedef typename HashMapType::iterator::KeysIterator iterator; - typedef typename HashMapType::const_iterator::KeysIterator const_iterator; - - iterator begin() { return HashMapType::begin().keys(); } - - iterator end() { return HashMapType::end().keys(); } - - const_iterator begin() const { return HashMapType::begin().keys(); } - - const_iterator end() const { return HashMapType::end().keys(); } - - private: - friend class HashMap; - - // These are intentionally not implemented. - HashMapKeysProxy(); - HashMapKeysProxy(const HashMapKeysProxy&); - HashMapKeysProxy& operator=(const HashMapKeysProxy&); - ~HashMapKeysProxy(); -}; - -template <typename KeyArg, - typename MappedArg, - typename HashArg, - typename KeyTraitsArg, - typename MappedTraitsArg, - typename Allocator> -class HashMap<KeyArg, - MappedArg, - HashArg, - KeyTraitsArg, - MappedTraitsArg, - Allocator>::HashMapValuesProxy : private HashMap<KeyArg, - MappedArg, - HashArg, - KeyTraitsArg, - MappedTraitsArg, - Allocator> { - DISALLOW_NEW(); - - public: - typedef HashMap<KeyArg, - MappedArg, - HashArg, - KeyTraitsArg, - MappedTraitsArg, - Allocator> - HashMapType; - typedef typename HashMapType::iterator::ValuesIterator iterator; - typedef typename HashMapType::const_iterator::ValuesIterator const_iterator; - - iterator begin() { return HashMapType::begin().values(); } - - iterator end() { return HashMapType::end().values(); } - - const_iterator begin() const { return HashMapType::begin().values(); } - - const_iterator end() const { return HashMapType::end().values(); } - - private: - friend class HashMap; - - // These are intentionally not implemented. - HashMapValuesProxy(); - HashMapValuesProxy(const HashMapValuesProxy&); - HashMapValuesProxy& operator=(const HashMapValuesProxy&); - ~HashMapValuesProxy(); -}; - -template <typename KeyTraits, typename MappedTraits> -struct HashMapValueTraits : KeyValuePairHashTraits<KeyTraits, MappedTraits> { - STATIC_ONLY(HashMapValueTraits); - static const bool hasIsEmptyValueFunction = true; - static bool isEmptyValue( - const typename KeyValuePairHashTraits<KeyTraits, MappedTraits>::TraitType& - value) { - return isHashTraitsEmptyValue<KeyTraits>(value.key); - } -}; - -template <typename ValueTraits, typename HashFunctions> -struct HashMapTranslator { - STATIC_ONLY(HashMapTranslator); - template <typename T> - static unsigned hash(const T& key) { - return HashFunctions::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return HashFunctions::equal(a, b); - } - template <typename T, typename U, typename V> - static void translate(T& location, U&& key, V&& mapped) { - location.key = std::forward<U>(key); - ValueTraits::ValueTraits::store(std::forward<V>(mapped), location.value); - } -}; - -template <typename ValueTraits, typename Translator> -struct HashMapTranslatorAdapter { - STATIC_ONLY(HashMapTranslatorAdapter); - template <typename T> - static unsigned hash(const T& key) { - return Translator::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return Translator::equal(a, b); - } - template <typename T, typename U, typename V> - static void translate(T& location, U&& key, V&& mapped, unsigned hashCode) { - Translator::translate(location.key, std::forward<U>(key), hashCode); - ValueTraits::ValueTraits::store(std::forward<V>(mapped), location.value); - } -}; - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -HashMap<T, U, V, W, X, Y>::HashMap(std::initializer_list<ValueType> elements) { - if (elements.size()) - m_impl.reserveCapacityForSize(elements.size()); - for (const ValueType& element : elements) - insert(element.key, element.value); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -auto HashMap<T, U, V, W, X, Y>::operator=( - std::initializer_list<ValueType> elements) -> HashMap& { - *this = HashMap(std::move(elements)); - return *this; -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline unsigned HashMap<T, U, V, W, X, Y>::size() const { - return m_impl.size(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline unsigned HashMap<T, U, V, W, X, Y>::capacity() const { - return m_impl.capacity(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline bool HashMap<T, U, V, W, X, Y>::isEmpty() const { - return m_impl.isEmpty(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline typename HashMap<T, U, V, W, X, Y>::iterator -HashMap<T, U, V, W, X, Y>::begin() { - return m_impl.begin(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline typename HashMap<T, U, V, W, X, Y>::iterator -HashMap<T, U, V, W, X, Y>::end() { - return m_impl.end(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline typename HashMap<T, U, V, W, X, Y>::const_iterator -HashMap<T, U, V, W, X, Y>::begin() const { - return m_impl.begin(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline typename HashMap<T, U, V, W, X, Y>::const_iterator -HashMap<T, U, V, W, X, Y>::end() const { - return m_impl.end(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline typename HashMap<T, U, V, W, X, Y>::iterator -HashMap<T, U, V, W, X, Y>::find(KeyPeekInType key) { - return m_impl.find(key); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline typename HashMap<T, U, V, W, X, Y>::const_iterator -HashMap<T, U, V, W, X, Y>::find(KeyPeekInType key) const { - return m_impl.find(key); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline bool HashMap<T, U, V, W, X, Y>::contains(KeyPeekInType key) const { - return m_impl.contains(key); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename HashTranslator, typename TYPE> -inline typename HashMap<T, U, V, W, X, Y>::iterator -HashMap<T, U, V, W, X, Y>::find(const TYPE& value) { - return m_impl - .template find<HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( - value); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename HashTranslator, typename TYPE> -inline typename HashMap<T, U, V, W, X, Y>::const_iterator -HashMap<T, U, V, W, X, Y>::find(const TYPE& value) const { - return m_impl - .template find<HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( - value); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename HashTranslator, typename TYPE> -inline bool HashMap<T, U, V, W, X, Y>::contains(const TYPE& value) const { - return m_impl - .template contains<HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( - value); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename IncomingKeyType, typename IncomingMappedType> -typename HashMap<T, U, V, W, X, Y>::AddResult -HashMap<T, U, V, W, X, Y>::inlineAdd(IncomingKeyType&& key, - IncomingMappedType&& mapped) { - return m_impl.template add<HashMapTranslator<ValueTraits, HashFunctions>>( - std::forward<IncomingKeyType>(key), - std::forward<IncomingMappedType>(mapped)); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename IncomingKeyType, typename IncomingMappedType> -typename HashMap<T, U, V, W, X, Y>::AddResult HashMap<T, U, V, W, X, Y>::set( - IncomingKeyType&& key, - IncomingMappedType&& mapped) { - AddResult result = inlineAdd(std::forward<IncomingKeyType>(key), - std::forward<IncomingMappedType>(mapped)); - if (!result.isNewEntry) { - // The inlineAdd call above found an existing hash table entry; we need - // to set the mapped value. - // - // It's safe to call std::forward again, because |mapped| isn't moved if - // there's an existing entry. - MappedTraits::store(std::forward<IncomingMappedType>(mapped), - result.storedValue->value); - } - return result; -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename HashTranslator, - typename IncomingKeyType, - typename IncomingMappedType> -auto HashMap<T, U, V, W, X, Y>::insert(IncomingKeyType&& key, - IncomingMappedType&& mapped) - -> AddResult { - return m_impl.template addPassingHashCode< - HashMapTranslatorAdapter<ValueTraits, HashTranslator>>( - std::forward<IncomingKeyType>(key), - std::forward<IncomingMappedType>(mapped)); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -template <typename IncomingKeyType, typename IncomingMappedType> -typename HashMap<T, U, V, W, X, Y>::AddResult HashMap<T, U, V, W, X, Y>::insert( - IncomingKeyType&& key, - IncomingMappedType&& mapped) { - return inlineAdd(std::forward<IncomingKeyType>(key), - std::forward<IncomingMappedType>(mapped)); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -typename HashMap<T, U, V, W, X, Y>::MappedPeekType -HashMap<T, U, V, W, X, Y>::at(KeyPeekInType key) const { - ValueType* entry = const_cast<HashTableType&>(m_impl).lookup(key); - if (!entry) - return MappedTraits::peek(MappedTraits::emptyValue()); - return MappedTraits::peek(entry->value); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline void HashMap<T, U, V, W, X, Y>::erase(iterator it) { - m_impl.remove(it.m_impl); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline void HashMap<T, U, V, W, X, Y>::erase(KeyPeekInType key) { - erase(find(key)); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline void HashMap<T, U, V, W, X, Y>::clear() { - m_impl.clear(); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -auto HashMap<T, U, V, W, X, Y>::take(KeyPeekInType key) -> MappedType { - iterator it = find(key); - if (it == end()) - return MappedTraits::emptyValue(); - MappedType result = std::move(it->value); - erase(it); - return result; -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline bool HashMap<T, U, V, W, X, Y>::isValidKey(KeyPeekInType key) { - if (KeyTraits::isDeletedValue(key)) - return false; - - if (HashFunctions::safeToCompareToEmptyOrDeleted) { - if (key == KeyTraits::emptyValue()) - return false; - } else { - if (isHashTraitsEmptyValue<KeyTraits>(key)) - return false; - } - - return true; -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -bool operator==(const HashMap<T, U, V, W, X, Y>& a, - const HashMap<T, U, V, W, X, Y>& b) { - if (a.size() != b.size()) - return false; - - typedef typename HashMap<T, U, V, W, X, Y>::const_iterator const_iterator; - - const_iterator aEnd = a.end(); - const_iterator bEnd = b.end(); - for (const_iterator it = a.begin(); it != aEnd; ++it) { - const_iterator bPos = b.find(it->key); - if (bPos == bEnd || it->value != bPos->value) - return false; - } - - return true; -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y> -inline bool operator!=(const HashMap<T, U, V, W, X, Y>& a, - const HashMap<T, U, V, W, X, Y>& b) { - return !(a == b); -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y, - typename Z> -inline void copyKeysToVector(const HashMap<T, U, V, W, X, Y>& collection, - Z& vector) { - typedef - typename HashMap<T, U, V, W, X, Y>::const_iterator::KeysIterator iterator; - - vector.resize(collection.size()); - - iterator it = collection.begin().keys(); - iterator end = collection.end().keys(); - for (unsigned i = 0; it != end; ++it, ++i) - vector[i] = *it; -} - -template <typename T, - typename U, - typename V, - typename W, - typename X, - typename Y, - typename Z> -inline void copyValuesToVector(const HashMap<T, U, V, W, X, Y>& collection, - Z& vector) { - typedef typename HashMap<T, U, V, W, X, Y>::const_iterator::ValuesIterator - iterator; - - vector.resize(collection.size()); - - iterator it = collection.begin().values(); - iterator end = collection.end().values(); - for (unsigned i = 0; it != end; ++it, ++i) - vector[i] = *it; -} - -} // namespace WTF - -using WTF::HashMap; - -#endif // WTF_HashMap_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashSet.h b/third_party/WebKit/Source/wtf/HashSet.h index 2f3644d3..15b1e39b 100644 --- a/third_party/WebKit/Source/wtf/HashSet.h +++ b/third_party/WebKit/Source/wtf/HashSet.h
@@ -1,330 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008, 2011 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashSet_h -#define WTF_HashSet_h +#include "platform/wtf/HashSet.h" -#include "wtf/HashTable.h" -#include "wtf/allocator/PartitionAllocator.h" -#include <initializer_list> - -namespace WTF { - -struct IdentityExtractor; - -// Note: empty or deleted values are not allowed, using them may lead to -// undefined behavior. For pointer valuess this means that null pointers are -// not allowed unless you supply custom traits. -template <typename ValueArg, - typename HashArg = typename DefaultHash<ValueArg>::Hash, - typename TraitsArg = HashTraits<ValueArg>, - typename Allocator = PartitionAllocator> -class HashSet { - USE_ALLOCATOR(HashSet, Allocator); - - private: - typedef HashArg HashFunctions; - typedef TraitsArg ValueTraits; - typedef typename ValueTraits::PeekInType ValuePeekInType; - - public: - typedef typename ValueTraits::TraitType ValueType; - using value_type = ValueType; - - private: - typedef HashTable<ValueType, - ValueType, - IdentityExtractor, - HashFunctions, - ValueTraits, - ValueTraits, - Allocator> - HashTableType; - - public: - typedef HashTableConstIteratorAdapter<HashTableType, ValueTraits> iterator; - typedef HashTableConstIteratorAdapter<HashTableType, ValueTraits> - const_iterator; - typedef typename HashTableType::AddResult AddResult; - - HashSet() { - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<ValueArg>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap HashSet. Use HeapHashSet<Member<T>> instead."); - } - HashSet(const HashSet&) = default; - HashSet& operator=(const HashSet&) = default; - HashSet(HashSet&&) = default; - HashSet& operator=(HashSet&&) = default; - - HashSet(std::initializer_list<ValueType> elements); - HashSet& operator=(std::initializer_list<ValueType> elements); - - void swap(HashSet& ref) { m_impl.swap(ref.m_impl); } - - unsigned size() const; - unsigned capacity() const; - bool isEmpty() const; - - void reserveCapacityForSize(unsigned size) { - m_impl.reserveCapacityForSize(size); - } - - iterator begin() const; - iterator end() const; - - iterator find(ValuePeekInType) const; - bool contains(ValuePeekInType) const; - - // An alternate version of find() that finds the object by hashing and - // comparing with some other type, to avoid the cost of type - // conversion. HashTranslator must have the following function members: - // static unsigned hash(const T&); - // static bool equal(const ValueType&, const T&); - template <typename HashTranslator, typename T> - iterator find(const T&) const; - template <typename HashTranslator, typename T> - bool contains(const T&) const; - - // The return value is a pair of an iterator to the new value's location, - // and a bool that is true if an new entry was added. - template <typename IncomingValueType> - AddResult insert(IncomingValueType&&); - - // An alternate version of add() that finds the object by hashing and - // comparing with some other type, to avoid the cost of type conversion if - // the object is already in the table. HashTranslator must have the - // following function members: - // static unsigned hash(const T&); - // static bool equal(const ValueType&, const T&); - // static translate(ValueType&, T&&, unsigned hashCode); - template <typename HashTranslator, typename T> - AddResult addWithTranslator(T&&); - - void erase(ValuePeekInType); - void erase(iterator); - void clear(); - template <typename Collection> - void removeAll(const Collection& toBeRemoved) { - WTF::removeAll(*this, toBeRemoved); - } - - ValueType take(iterator); - ValueType take(ValuePeekInType); - ValueType takeAny(); - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - m_impl.trace(visitor); - } - - private: - HashTableType m_impl; -}; - -struct IdentityExtractor { - STATIC_ONLY(IdentityExtractor); - template <typename T> - static const T& extract(const T& t) { - return t; - } -}; - -template <typename Translator> -struct HashSetTranslatorAdapter { - STATIC_ONLY(HashSetTranslatorAdapter); - template <typename T> - static unsigned hash(const T& key) { - return Translator::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return Translator::equal(a, b); - } - template <typename T, typename U, typename V> - static void translate(T& location, U&& key, const V&, unsigned hashCode) { - Translator::translate(location, std::forward<U>(key), hashCode); - } -}; - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -HashSet<Value, HashFunctions, Traits, Allocator>::HashSet( - std::initializer_list<ValueType> elements) { - if (elements.size()) - m_impl.reserveCapacityForSize(elements.size()); - for (const ValueType& element : elements) - insert(element); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -auto HashSet<Value, HashFunctions, Traits, Allocator>::operator=( - std::initializer_list<ValueType> elements) -> HashSet& { - *this = HashSet(std::move(elements)); - return *this; -} - -template <typename T, typename U, typename V, typename W> -inline unsigned HashSet<T, U, V, W>::size() const { - return m_impl.size(); -} - -template <typename T, typename U, typename V, typename W> -inline unsigned HashSet<T, U, V, W>::capacity() const { - return m_impl.capacity(); -} - -template <typename T, typename U, typename V, typename W> -inline bool HashSet<T, U, V, W>::isEmpty() const { - return m_impl.isEmpty(); -} - -template <typename T, typename U, typename V, typename W> -inline typename HashSet<T, U, V, W>::iterator HashSet<T, U, V, W>::begin() - const { - return m_impl.begin(); -} - -template <typename T, typename U, typename V, typename W> -inline typename HashSet<T, U, V, W>::iterator HashSet<T, U, V, W>::end() const { - return m_impl.end(); -} - -template <typename T, typename U, typename V, typename W> -inline typename HashSet<T, U, V, W>::iterator HashSet<T, U, V, W>::find( - ValuePeekInType value) const { - return m_impl.find(value); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -inline bool HashSet<Value, HashFunctions, Traits, Allocator>::contains( - ValuePeekInType value) const { - return m_impl.contains(value); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -template <typename HashTranslator, typename T> -typename HashSet<Value, HashFunctions, Traits, Allocator>:: - iterator inline HashSet<Value, HashFunctions, Traits, Allocator>::find( - const T& value) const { - return m_impl.template find<HashSetTranslatorAdapter<HashTranslator>>(value); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -template <typename HashTranslator, typename T> -inline bool HashSet<Value, HashFunctions, Traits, Allocator>::contains( - const T& value) const { - return m_impl.template contains<HashSetTranslatorAdapter<HashTranslator>>( - value); -} - -template <typename T, typename U, typename V, typename W> -template <typename IncomingValueType> -inline typename HashSet<T, U, V, W>::AddResult HashSet<T, U, V, W>::insert( - IncomingValueType&& value) { - return m_impl.add(std::forward<IncomingValueType>(value)); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -template <typename HashTranslator, typename T> -inline typename HashSet<Value, HashFunctions, Traits, Allocator>::AddResult -HashSet<Value, HashFunctions, Traits, Allocator>::addWithTranslator(T&& value) { - // Forward only the first argument, because the second argument isn't actually - // used in HashSetTranslatorAdapter. - return m_impl - .template addPassingHashCode<HashSetTranslatorAdapter<HashTranslator>>( - std::forward<T>(value), value); -} - -template <typename T, typename U, typename V, typename W> -inline void HashSet<T, U, V, W>::erase(iterator it) { - m_impl.remove(it.m_impl); -} - -template <typename T, typename U, typename V, typename W> -inline void HashSet<T, U, V, W>::erase(ValuePeekInType value) { - erase(find(value)); -} - -template <typename T, typename U, typename V, typename W> -inline void HashSet<T, U, V, W>::clear() { - m_impl.clear(); -} - -template <typename T, typename U, typename V, typename W> -inline auto HashSet<T, U, V, W>::take(iterator it) -> ValueType { - if (it == end()) - return ValueTraits::emptyValue(); - - ValueType result = std::move(const_cast<ValueType&>(*it)); - erase(it); - - return result; -} - -template <typename T, typename U, typename V, typename W> -inline auto HashSet<T, U, V, W>::take(ValuePeekInType value) -> ValueType { - return take(find(value)); -} - -template <typename T, typename U, typename V, typename W> -inline auto HashSet<T, U, V, W>::takeAny() -> ValueType { - return take(begin()); -} - -template <typename C, typename W> -inline void copyToVector(const C& collection, W& vector) { - typedef typename C::const_iterator iterator; - - { - // Disallow GC across resize allocation, see crbug.com/568173 - typename W::GCForbiddenScope scope; - vector.resize(collection.size()); - } - - iterator it = collection.begin(); - iterator end = collection.end(); - for (unsigned i = 0; it != end; ++it, ++i) - vector[i] = *it; -} - -} // namespace WTF - -using WTF::HashSet; - -#endif // WTF_HashSet_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashTable.h b/third_party/WebKit/Source/wtf/HashTable.h index a02aea98..758d4f7 100644 --- a/third_party/WebKit/Source/wtf/HashTable.h +++ b/third_party/WebKit/Source/wtf/HashTable.h
@@ -1,2280 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights - * reserved. - * Copyright (C) 2008 David Levin <levin@chromium.org> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library - * General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashTable_h -#define WTF_HashTable_h +#include "platform/wtf/HashTable.h" -#include "wtf/Alignment.h" -#include "wtf/Allocator.h" -#include "wtf/Assertions.h" -#include "wtf/ConditionalDestructor.h" -#include "wtf/HashTraits.h" -#include "wtf/PtrUtil.h" -#include "wtf/allocator/PartitionAllocator.h" -#include <memory> - -#define DUMP_HASHTABLE_STATS 0 -#define DUMP_HASHTABLE_STATS_PER_TABLE 0 - -#if DUMP_HASHTABLE_STATS -#include "wtf/Atomics.h" -#include "wtf/Threading.h" -#endif - -#if DUMP_HASHTABLE_STATS_PER_TABLE -#include "wtf/DataLog.h" -#include <type_traits> -#endif - -#if DUMP_HASHTABLE_STATS -#if DUMP_HASHTABLE_STATS_PER_TABLE - -#define UPDATE_PROBE_COUNTS() \ - ++probeCount; \ - HashTableStats::instance().recordCollisionAtCount(probeCount); \ - ++perTableProbeCount; \ - m_stats->recordCollisionAtCount(perTableProbeCount) -#define UPDATE_ACCESS_COUNTS() \ - atomicIncrement(&HashTableStats::instance().numAccesses); \ - int probeCount = 0; \ - ++m_stats->numAccesses; \ - int perTableProbeCount = 0 -#else -#define UPDATE_PROBE_COUNTS() \ - ++probeCount; \ - HashTableStats::instance().recordCollisionAtCount(probeCount) -#define UPDATE_ACCESS_COUNTS() \ - atomicIncrement(&HashTableStats::instance().numAccesses); \ - int probeCount = 0 -#endif -#else -#if DUMP_HASHTABLE_STATS_PER_TABLE -#define UPDATE_PROBE_COUNTS() \ - ++perTableProbeCount; \ - m_stats->recordCollisionAtCount(perTableProbeCount) -#define UPDATE_ACCESS_COUNTS() \ - ++m_stats->numAccesses; \ - int perTableProbeCount = 0 -#else -#define UPDATE_PROBE_COUNTS() \ - do { \ - } while (0) -#define UPDATE_ACCESS_COUNTS() \ - do { \ - } while (0) -#endif -#endif - -namespace WTF { - -// This is for tracing inside collections that have special support for weak -// pointers. The trait has a trace method which returns true if there are weak -// pointers to things that have not (yet) been marked live. Returning true -// indicates that the entry in the collection may yet be removed by weak -// handling. Default implementation for non-weak types is to use the regular -// non-weak TraceTrait. Default implementation for types with weakness is to -// call traceInCollection on the type's trait. -template <WeakHandlingFlag weakHandlingFlag, - ShouldWeakPointersBeMarkedStrongly strongify, - typename T, - typename Traits> -struct TraceInCollectionTrait; - -#if DUMP_HASHTABLE_STATS -struct WTF_EXPORT HashTableStats { - HashTableStats() - : numAccesses(0), - numRehashes(0), - numRemoves(0), - numReinserts(0), - maxCollisions(0), - numCollisions(0), - collisionGraph() {} - - // The following variables are all atomically incremented when modified. - int numAccesses; - int numRehashes; - int numRemoves; - int numReinserts; - - // The following variables are only modified in the recordCollisionAtCount - // method within a mutex. - int maxCollisions; - int numCollisions; - int collisionGraph[4096]; - - void copy(const HashTableStats* other); - void recordCollisionAtCount(int count); - void dumpStats(); - - static HashTableStats& instance(); - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher) {} -}; - -#if DUMP_HASHTABLE_STATS_PER_TABLE -template <typename Allocator, bool isGCType = Allocator::isGarbageCollected> -class HashTableStatsPtr; - -template <typename Allocator> -class HashTableStatsPtr<Allocator, false> final { - STATIC_ONLY(HashTableStatsPtr); - - public: - static std::unique_ptr<HashTableStats> create() { - return WTF::wrapUnique(new HashTableStats); - } - - static std::unique_ptr<HashTableStats> copy( - const std::unique_ptr<HashTableStats>& other) { - if (!other) - return nullptr; - return WTF::wrapUnique(new HashTableStats(*other)); - } - - static void swap(std::unique_ptr<HashTableStats>& stats, - std::unique_ptr<HashTableStats>& other) { - stats.swap(other); - } -}; - -template <typename Allocator> -class HashTableStatsPtr<Allocator, true> final { - STATIC_ONLY(HashTableStatsPtr); - - public: - static HashTableStats* create() { - // Resort to manually allocating this POD on the vector - // backing heap, as blink::GarbageCollected<> isn't in scope - // in WTF. - void* storage = reinterpret_cast<void*>( - Allocator::template allocateVectorBacking<unsigned char>( - sizeof(HashTableStats))); - return new (storage) HashTableStats; - } - - static HashTableStats* copy(const HashTableStats* other) { - if (!other) - return nullptr; - HashTableStats* obj = create(); - obj->copy(other); - return obj; - } - - static void swap(HashTableStats*& stats, HashTableStats*& other) { - std::swap(stats, other); - } -}; -#endif -#endif - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -class HashTable; -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -class HashTableIterator; -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -class HashTableConstIterator; -template <typename Value, - typename HashFunctions, - typename HashTraits, - typename Allocator> -class LinkedHashSet; -template <WeakHandlingFlag x, - typename T, - typename U, - typename V, - typename W, - typename X, - typename Y, - typename Z> -struct WeakProcessingHashTableHelper; - -typedef enum { HashItemKnownGood } HashItemKnownGoodTag; - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -class HashTableConstIterator final { - DISALLOW_NEW(); - - private: - typedef HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - HashTableType; - typedef HashTableIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - iterator; - typedef HashTableConstIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - const_iterator; - typedef Value ValueType; - using value_type = ValueType; - typedef typename Traits::IteratorConstGetType GetType; - typedef const ValueType* PointerType; - - friend class HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>; - friend class HashTableIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>; - - void skipEmptyBuckets() { - while (m_position != m_endPosition && - HashTableType::isEmptyOrDeletedBucket(*m_position)) - ++m_position; - } - - HashTableConstIterator(PointerType position, - PointerType endPosition, - const HashTableType* container) - : m_position(position), - m_endPosition(endPosition) -#if DCHECK_IS_ON() - , - m_container(container), - m_containerModifications(container->modifications()) -#endif - { - skipEmptyBuckets(); - } - - HashTableConstIterator(PointerType position, - PointerType endPosition, - const HashTableType* container, - HashItemKnownGoodTag) - : m_position(position), - m_endPosition(endPosition) -#if DCHECK_IS_ON() - , - m_container(container), - m_containerModifications(container->modifications()) -#endif - { -#if DCHECK_IS_ON() - DCHECK_EQ(m_containerModifications, m_container->modifications()); -#endif - } - - void checkModifications() const { -#if DCHECK_IS_ON() - // HashTable and collections that build on it do not support - // modifications while there is an iterator in use. The exception is - // ListHashSet, which has its own iterators that tolerate modification - // of the underlying set. - DCHECK_EQ(m_containerModifications, m_container->modifications()); - DCHECK(!m_container->accessForbidden()); -#endif - } - - public: - HashTableConstIterator() {} - - GetType get() const { - checkModifications(); - return m_position; - } - typename Traits::IteratorConstReferenceType operator*() const { - return Traits::getToReferenceConstConversion(get()); - } - GetType operator->() const { return get(); } - - const_iterator& operator++() { - DCHECK_NE(m_position, m_endPosition); - checkModifications(); - ++m_position; - skipEmptyBuckets(); - return *this; - } - - // postfix ++ intentionally omitted - - // Comparison. - bool operator==(const const_iterator& other) const { - return m_position == other.m_position; - } - bool operator!=(const const_iterator& other) const { - return m_position != other.m_position; - } - bool operator==(const iterator& other) const { - return *this == static_cast<const_iterator>(other); - } - bool operator!=(const iterator& other) const { - return *this != static_cast<const_iterator>(other); - } - - std::ostream& printTo(std::ostream& stream) const { - if (m_position == m_endPosition) - return stream << "iterator representing <end>"; - // TODO(tkent): Change |m_position| to |*m_position| to show the - // pointed object. It requires a lot of new stream printer functions. - return stream << "iterator pointing to " << m_position; - } - - private: - PointerType m_position; - PointerType m_endPosition; -#if DCHECK_IS_ON() - const HashTableType* m_container; - int64_t m_containerModifications; -#endif -}; - -template <typename Key, - typename Value, - typename Extractor, - typename Hash, - typename Traits, - typename KeyTraits, - typename Allocator> -std::ostream& operator<<(std::ostream& stream, - const HashTableConstIterator<Key, - Value, - Extractor, - Hash, - Traits, - KeyTraits, - Allocator>& iterator) { - return iterator.printTo(stream); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -class HashTableIterator final { - DISALLOW_NEW(); - - private: - typedef HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - HashTableType; - typedef HashTableIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - iterator; - typedef HashTableConstIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - const_iterator; - typedef Value ValueType; - typedef typename Traits::IteratorGetType GetType; - typedef ValueType* PointerType; - - friend class HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>; - - HashTableIterator(PointerType pos, - PointerType end, - const HashTableType* container) - : m_iterator(pos, end, container) {} - HashTableIterator(PointerType pos, - PointerType end, - const HashTableType* container, - HashItemKnownGoodTag tag) - : m_iterator(pos, end, container, tag) {} - - public: - HashTableIterator() {} - - // default copy, assignment and destructor are OK - - GetType get() const { return const_cast<GetType>(m_iterator.get()); } - typename Traits::IteratorReferenceType operator*() const { - return Traits::getToReferenceConversion(get()); - } - GetType operator->() const { return get(); } - - iterator& operator++() { - ++m_iterator; - return *this; - } - - // postfix ++ intentionally omitted - - // Comparison. - bool operator==(const iterator& other) const { - return m_iterator == other.m_iterator; - } - bool operator!=(const iterator& other) const { - return m_iterator != other.m_iterator; - } - bool operator==(const const_iterator& other) const { - return m_iterator == other; - } - bool operator!=(const const_iterator& other) const { - return m_iterator != other; - } - - operator const_iterator() const { return m_iterator; } - std::ostream& printTo(std::ostream& stream) const { - return m_iterator.printTo(stream); - } - - private: - const_iterator m_iterator; -}; - -template <typename Key, - typename Value, - typename Extractor, - typename Hash, - typename Traits, - typename KeyTraits, - typename Allocator> -std::ostream& operator<<(std::ostream& stream, - const HashTableIterator<Key, - Value, - Extractor, - Hash, - Traits, - KeyTraits, - Allocator>& iterator) { - return iterator.printTo(stream); -} - -using std::swap; - -template <typename T, typename Allocator, bool enterGCForbiddenScope> -struct Mover { - STATIC_ONLY(Mover); - static void move(T&& from, T& to) { - to.~T(); - new (NotNull, &to) T(std::move(from)); - } -}; - -template <typename T, typename Allocator> -struct Mover<T, Allocator, true> { - STATIC_ONLY(Mover); - static void move(T&& from, T& to) { - to.~T(); - Allocator::enterGCForbiddenScope(); - new (NotNull, &to) T(std::move(from)); - Allocator::leaveGCForbiddenScope(); - } -}; - -template <typename HashFunctions> -class IdentityHashTranslator { - STATIC_ONLY(IdentityHashTranslator); - - public: - template <typename T> - static unsigned hash(const T& key) { - return HashFunctions::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return HashFunctions::equal(a, b); - } - template <typename T, typename U, typename V> - static void translate(T& location, U&&, V&& value) { - location = std::forward<V>(value); - } -}; - -template <typename HashTableType, typename ValueType> -struct HashTableAddResult final { - STACK_ALLOCATED(); - HashTableAddResult(const HashTableType* container, - ValueType* storedValue, - bool isNewEntry) - : storedValue(storedValue), - isNewEntry(isNewEntry) -#if ENABLE(SECURITY_ASSERT) - , - m_container(container), - m_containerModifications(container->modifications()) -#endif - { - ALLOW_UNUSED_LOCAL(container); - DCHECK(container); - } - - ValueType* storedValue; - bool isNewEntry; - -#if ENABLE(SECURITY_ASSERT) - ~HashTableAddResult() { - // If rehash happened before accessing storedValue, it's - // use-after-free. Any modification may cause a rehash, so we check for - // modifications here. - - // Rehash after accessing storedValue is harmless but will assert if the - // AddResult destructor takes place after a modification. You may need - // to limit the scope of the AddResult. - SECURITY_DCHECK(m_containerModifications == m_container->modifications()); - } - - private: - const HashTableType* m_container; - const int64_t m_containerModifications; -#endif -}; - -template <typename Value, typename Extractor, typename KeyTraits> -struct HashTableHelper { - STATIC_ONLY(HashTableHelper); - static bool isEmptyBucket(const Value& value) { - return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); - } - static bool isDeletedBucket(const Value& value) { - return KeyTraits::isDeletedValue(Extractor::extract(value)); - } - static bool isEmptyOrDeletedBucket(const Value& value) { - return isEmptyBucket(value) || isDeletedBucket(value); - } -}; - -template <typename HashTranslator, - typename KeyTraits, - bool safeToCompareToEmptyOrDeleted> -struct HashTableKeyChecker { - STATIC_ONLY(HashTableKeyChecker); - // There's no simple generic way to make this check if - // safeToCompareToEmptyOrDeleted is false, so the check always passes. - template <typename T> - static bool checkKey(const T&) { - return true; - } -}; - -template <typename HashTranslator, typename KeyTraits> -struct HashTableKeyChecker<HashTranslator, KeyTraits, true> { - STATIC_ONLY(HashTableKeyChecker); - template <typename T> - static bool checkKey(const T& key) { - // FIXME : Check also equality to the deleted value. - return !HashTranslator::equal(KeyTraits::emptyValue(), key); - } -}; - -// Note: empty or deleted key values are not allowed, using them may lead to -// undefined behavior. For pointer keys this means that null pointers are not -// allowed unless you supply custom key traits. -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -class HashTable final - : public ConditionalDestructor<HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>, - Allocator::isGarbageCollected> { - DISALLOW_NEW(); - - public: - typedef HashTableIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - iterator; - typedef HashTableConstIterator<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> - const_iterator; - typedef Traits ValueTraits; - typedef Key KeyType; - typedef typename KeyTraits::PeekInType KeyPeekInType; - typedef Value ValueType; - typedef Extractor ExtractorType; - typedef KeyTraits KeyTraitsType; - typedef IdentityHashTranslator<HashFunctions> IdentityTranslatorType; - typedef HashTableAddResult<HashTable, ValueType> AddResult; - - HashTable(); - void finalize() { - DCHECK(!Allocator::isGarbageCollected); - if (LIKELY(!m_table)) - return; - enterAccessForbiddenScope(); - deleteAllBucketsAndDeallocate(m_table, m_tableSize); - leaveAccessForbiddenScope(); - m_table = nullptr; - } - - HashTable(const HashTable&); - HashTable(HashTable&&); - void swap(HashTable&); - HashTable& operator=(const HashTable&); - HashTable& operator=(HashTable&&); - - // When the hash table is empty, just return the same iterator for end as - // for begin. This is more efficient because we don't have to skip all the - // empty and deleted buckets, and iterating an empty table is a common case - // that's worth optimizing. - iterator begin() { return isEmpty() ? end() : makeIterator(m_table); } - iterator end() { return makeKnownGoodIterator(m_table + m_tableSize); } - const_iterator begin() const { - return isEmpty() ? end() : makeConstIterator(m_table); - } - const_iterator end() const { - return makeKnownGoodConstIterator(m_table + m_tableSize); - } - - unsigned size() const { - DCHECK(!accessForbidden()); - return m_keyCount; - } - unsigned capacity() const { - DCHECK(!accessForbidden()); - return m_tableSize; - } - bool isEmpty() const { - DCHECK(!accessForbidden()); - return !m_keyCount; - } - - void reserveCapacityForSize(unsigned size); - - template <typename IncomingValueType> - AddResult add(IncomingValueType&& value) { - return add<IdentityTranslatorType>(Extractor::extract(value), - std::forward<IncomingValueType>(value)); - } - - // A special version of add() that finds the object by hashing and comparing - // with some other type, to avoid the cost of type conversion if the object - // is already in the table. - template <typename HashTranslator, typename T, typename Extra> - AddResult add(T&& key, Extra&&); - template <typename HashTranslator, typename T, typename Extra> - AddResult addPassingHashCode(T&& key, Extra&&); - - iterator find(KeyPeekInType key) { return find<IdentityTranslatorType>(key); } - const_iterator find(KeyPeekInType key) const { - return find<IdentityTranslatorType>(key); - } - bool contains(KeyPeekInType key) const { - return contains<IdentityTranslatorType>(key); - } - - template <typename HashTranslator, typename T> - iterator find(const T&); - template <typename HashTranslator, typename T> - const_iterator find(const T&) const; - template <typename HashTranslator, typename T> - bool contains(const T&) const; - - void remove(KeyPeekInType); - void remove(iterator); - void remove(const_iterator); - void clear(); - - static bool isEmptyBucket(const ValueType& value) { - return isHashTraitsEmptyValue<KeyTraits>(Extractor::extract(value)); - } - static bool isDeletedBucket(const ValueType& value) { - return KeyTraits::isDeletedValue(Extractor::extract(value)); - } - static bool isEmptyOrDeletedBucket(const ValueType& value) { - return HashTableHelper<ValueType, Extractor, - KeyTraits>::isEmptyOrDeletedBucket(value); - } - - ValueType* lookup(KeyPeekInType key) { - return lookup<IdentityTranslatorType, KeyPeekInType>(key); - } - template <typename HashTranslator, typename T> - ValueType* lookup(const T&); - template <typename HashTranslator, typename T> - const ValueType* lookup(const T&) const; - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher); - -#if DCHECK_IS_ON() - void enterAccessForbiddenScope() { - DCHECK(!m_accessForbidden); - m_accessForbidden = true; - } - void leaveAccessForbiddenScope() { m_accessForbidden = false; } - bool accessForbidden() const { return m_accessForbidden; } - int64_t modifications() const { return m_modifications; } - void registerModification() { m_modifications++; } - // HashTable and collections that build on it do not support modifications - // while there is an iterator in use. The exception is ListHashSet, which - // has its own iterators that tolerate modification of the underlying set. - void checkModifications(int64_t mods) const { - DCHECK_EQ(mods, m_modifications); - } -#else - ALWAYS_INLINE void enterAccessForbiddenScope() {} - ALWAYS_INLINE void leaveAccessForbiddenScope() {} - ALWAYS_INLINE bool accessForbidden() const { return false; } - ALWAYS_INLINE int64_t modifications() const { return 0; } - ALWAYS_INLINE void registerModification() {} - ALWAYS_INLINE void checkModifications(int64_t mods) const {} -#endif - - private: - static ValueType* allocateTable(unsigned size); - static void deleteAllBucketsAndDeallocate(ValueType* table, unsigned size); - - typedef std::pair<ValueType*, bool> LookupType; - typedef std::pair<LookupType, unsigned> FullLookupType; - - LookupType lookupForWriting(const Key& key) { - return lookupForWriting<IdentityTranslatorType>(key); - } - template <typename HashTranslator, typename T> - FullLookupType fullLookupForWriting(const T&); - template <typename HashTranslator, typename T> - LookupType lookupForWriting(const T&); - - void remove(ValueType*); - - bool shouldExpand() const { - return (m_keyCount + m_deletedCount) * m_maxLoad >= m_tableSize; - } - bool mustRehashInPlace() const { - return m_keyCount * m_minLoad < m_tableSize * 2; - } - bool shouldShrink() const { - // isAllocationAllowed check should be at the last because it's - // expensive. - return m_keyCount * m_minLoad < m_tableSize && - m_tableSize > KeyTraits::minimumTableSize && - Allocator::isAllocationAllowed(); - } - ValueType* expand(ValueType* entry = 0); - void shrink() { rehash(m_tableSize / 2, 0); } - - ValueType* expandBuffer(unsigned newTableSize, ValueType* entry, bool&); - ValueType* rehashTo(ValueType* newTable, - unsigned newTableSize, - ValueType* entry); - ValueType* rehash(unsigned newTableSize, ValueType* entry); - ValueType* reinsert(ValueType&&); - - static void initializeBucket(ValueType& bucket); - static void deleteBucket(ValueType& bucket) { - bucket.~ValueType(); - Traits::constructDeletedValue(bucket, Allocator::isGarbageCollected); - } - - FullLookupType makeLookupResult(ValueType* position, - bool found, - unsigned hash) { - return FullLookupType(LookupType(position, found), hash); - } - - iterator makeIterator(ValueType* pos) { - return iterator(pos, m_table + m_tableSize, this); - } - const_iterator makeConstIterator(ValueType* pos) const { - return const_iterator(pos, m_table + m_tableSize, this); - } - iterator makeKnownGoodIterator(ValueType* pos) { - return iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); - } - const_iterator makeKnownGoodConstIterator(ValueType* pos) const { - return const_iterator(pos, m_table + m_tableSize, this, HashItemKnownGood); - } - - static const unsigned m_maxLoad = 2; - static const unsigned m_minLoad = 6; - - unsigned tableSizeMask() const { - size_t mask = m_tableSize - 1; - DCHECK_EQ((mask & m_tableSize), 0u); - return mask; - } - - void setEnqueued() { m_queueFlag = true; } - void clearEnqueued() { m_queueFlag = false; } - bool enqueued() { return m_queueFlag; } - - ValueType* m_table; - unsigned m_tableSize; - unsigned m_keyCount; -#if DCHECK_IS_ON() - unsigned m_deletedCount : 30; - unsigned m_queueFlag : 1; - unsigned m_accessForbidden : 1; - unsigned m_modifications; -#else - unsigned m_deletedCount : 31; - unsigned m_queueFlag : 1; -#endif - -#if DUMP_HASHTABLE_STATS_PER_TABLE - public: - mutable - typename std::conditional<Allocator::isGarbageCollected, - HashTableStats*, - std::unique_ptr<HashTableStats>>::type m_stats; -#endif - - template <WeakHandlingFlag x, - typename T, - typename U, - typename V, - typename W, - typename X, - typename Y, - typename Z> - friend struct WeakProcessingHashTableHelper; - template <typename T, typename U, typename V, typename W> - friend class LinkedHashSet; -}; - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -inline HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::HashTable() - : m_table(nullptr), - m_tableSize(0), - m_keyCount(0), - m_deletedCount(0), - m_queueFlag(false) -#if DCHECK_IS_ON() - , - m_accessForbidden(false), - m_modifications(0) -#endif -#if DUMP_HASHTABLE_STATS_PER_TABLE - , - m_stats(nullptr) -#endif -{ - static_assert(Allocator::isGarbageCollected || - (!IsPointerToGarbageCollectedType<Key>::value && - !IsPointerToGarbageCollectedType<Value>::value), - "Cannot put raw pointers to garbage-collected classes into an " - "off-heap collection."); -} - -inline unsigned doubleHash(unsigned key) { - key = ~key + (key >> 23); - key ^= (key << 12); - key ^= (key >> 7); - key ^= (key << 2); - key ^= (key >> 20); - return key; -} - -inline unsigned calculateCapacity(unsigned size) { - for (unsigned mask = size; mask; mask >>= 1) - size |= mask; // 00110101010 -> 00111111111 - return (size + 1) * 2; // 00111111111 -> 10000000000 -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -void HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::reserveCapacityForSize(unsigned newSize) { - unsigned newCapacity = calculateCapacity(newSize); - if (newCapacity < KeyTraits::minimumTableSize) - newCapacity = KeyTraits::minimumTableSize; - - if (newCapacity > capacity()) { - RELEASE_ASSERT(!static_cast<int>( - newCapacity >> - 31)); // HashTable capacity should not overflow 32bit int. - rehash(newCapacity, 0); - } -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -inline Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - lookup(const T& key) { - return const_cast<Value*>( - const_cast<const HashTable*>(this)->lookup<HashTranslator>(key)); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -inline const Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - lookup(const T& key) const { - DCHECK(!accessForbidden()); - DCHECK((HashTableKeyChecker< - HashTranslator, KeyTraits, - HashFunctions::safeToCompareToEmptyOrDeleted>::checkKey(key))); - const ValueType* table = m_table; - if (!table) - return nullptr; - - size_t k = 0; - size_t sizeMask = tableSizeMask(); - unsigned h = HashTranslator::hash(key); - size_t i = h & sizeMask; - - UPDATE_ACCESS_COUNTS(); - - while (1) { - const ValueType* entry = table + i; - - if (HashFunctions::safeToCompareToEmptyOrDeleted) { - if (HashTranslator::equal(Extractor::extract(*entry), key)) - return entry; - - if (isEmptyBucket(*entry)) - return nullptr; - } else { - if (isEmptyBucket(*entry)) - return nullptr; - - if (!isDeletedBucket(*entry) && - HashTranslator::equal(Extractor::extract(*entry), key)) - return entry; - } - UPDATE_PROBE_COUNTS(); - if (!k) - k = 1 | doubleHash(h); - i = (i + k) & sizeMask; - } -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -inline typename HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::LookupType -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - lookupForWriting(const T& key) { - DCHECK(!accessForbidden()); - DCHECK(m_table); - registerModification(); - - ValueType* table = m_table; - size_t k = 0; - size_t sizeMask = tableSizeMask(); - unsigned h = HashTranslator::hash(key); - size_t i = h & sizeMask; - - UPDATE_ACCESS_COUNTS(); - - ValueType* deletedEntry = nullptr; - - while (1) { - ValueType* entry = table + i; - - if (isEmptyBucket(*entry)) - return LookupType(deletedEntry ? deletedEntry : entry, false); - - if (HashFunctions::safeToCompareToEmptyOrDeleted) { - if (HashTranslator::equal(Extractor::extract(*entry), key)) - return LookupType(entry, true); - - if (isDeletedBucket(*entry)) - deletedEntry = entry; - } else { - if (isDeletedBucket(*entry)) - deletedEntry = entry; - else if (HashTranslator::equal(Extractor::extract(*entry), key)) - return LookupType(entry, true); - } - UPDATE_PROBE_COUNTS(); - if (!k) - k = 1 | doubleHash(h); - i = (i + k) & sizeMask; - } -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -inline typename HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::FullLookupType -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - fullLookupForWriting(const T& key) { - DCHECK(!accessForbidden()); - DCHECK(m_table); - registerModification(); - - ValueType* table = m_table; - size_t k = 0; - size_t sizeMask = tableSizeMask(); - unsigned h = HashTranslator::hash(key); - size_t i = h & sizeMask; - - UPDATE_ACCESS_COUNTS(); - - ValueType* deletedEntry = nullptr; - - while (1) { - ValueType* entry = table + i; - - if (isEmptyBucket(*entry)) - return makeLookupResult(deletedEntry ? deletedEntry : entry, false, h); - - if (HashFunctions::safeToCompareToEmptyOrDeleted) { - if (HashTranslator::equal(Extractor::extract(*entry), key)) - return makeLookupResult(entry, true, h); - - if (isDeletedBucket(*entry)) - deletedEntry = entry; - } else { - if (isDeletedBucket(*entry)) - deletedEntry = entry; - else if (HashTranslator::equal(Extractor::extract(*entry), key)) - return makeLookupResult(entry, true, h); - } - UPDATE_PROBE_COUNTS(); - if (!k) - k = 1 | doubleHash(h); - i = (i + k) & sizeMask; - } -} - -template <bool emptyValueIsZero> -struct HashTableBucketInitializer; - -template <> -struct HashTableBucketInitializer<false> { - STATIC_ONLY(HashTableBucketInitializer); - template <typename Traits, typename Value> - static void initialize(Value& bucket) { - new (NotNull, &bucket) Value(Traits::emptyValue()); - } -}; - -template <> -struct HashTableBucketInitializer<true> { - STATIC_ONLY(HashTableBucketInitializer); - template <typename Traits, typename Value> - static void initialize(Value& bucket) { - // This initializes the bucket without copying the empty value. That - // makes it possible to use this with types that don't support copying. - // The memset to 0 looks like a slow operation but is optimized by the - // compilers. - memset(&bucket, 0, sizeof(bucket)); - } -}; - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -inline void -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - initializeBucket(ValueType& bucket) { - HashTableBucketInitializer<Traits::emptyValueIsZero>::template initialize< - Traits>(bucket); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T, typename Extra> -typename HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::AddResult -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - add(T&& key, Extra&& extra) { - DCHECK(!accessForbidden()); - DCHECK(Allocator::isAllocationAllowed()); - if (!m_table) - expand(); - - DCHECK(m_table); - - ValueType* table = m_table; - size_t k = 0; - size_t sizeMask = tableSizeMask(); - unsigned h = HashTranslator::hash(key); - size_t i = h & sizeMask; - - UPDATE_ACCESS_COUNTS(); - - ValueType* deletedEntry = nullptr; - ValueType* entry; - while (1) { - entry = table + i; - - if (isEmptyBucket(*entry)) - break; - - if (HashFunctions::safeToCompareToEmptyOrDeleted) { - if (HashTranslator::equal(Extractor::extract(*entry), key)) - return AddResult(this, entry, false); - - if (isDeletedBucket(*entry)) - deletedEntry = entry; - } else { - if (isDeletedBucket(*entry)) - deletedEntry = entry; - else if (HashTranslator::equal(Extractor::extract(*entry), key)) - return AddResult(this, entry, false); - } - UPDATE_PROBE_COUNTS(); - if (!k) - k = 1 | doubleHash(h); - i = (i + k) & sizeMask; - } - - registerModification(); - - if (deletedEntry) { - // Overwrite any data left over from last use, using placement new or - // memset. - initializeBucket(*deletedEntry); - entry = deletedEntry; - --m_deletedCount; - } - - HashTranslator::translate(*entry, std::forward<T>(key), - std::forward<Extra>(extra)); - DCHECK(!isEmptyOrDeletedBucket(*entry)); - - ++m_keyCount; - - if (shouldExpand()) { - entry = expand(entry); - } else if (Traits::weakHandlingFlag == WeakHandlingInCollections && - shouldShrink()) { - // When weak hash tables are processed by the garbage collector, - // elements with no other strong references to them will have their - // table entries cleared. But no shrinking of the backing store is - // allowed at that time, as allocations are prohibited during that - // GC phase. - // - // With that weak processing taking care of removals, explicit - // remove()s of elements is rarely done. Which implies that the - // weak hash table will never be checked if it can be shrunk. - // - // To prevent weak hash tables with very low load factors from - // developing, we perform it when adding elements instead. - entry = rehash(m_tableSize / 2, entry); - } - - return AddResult(this, entry, true); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T, typename Extra> -typename HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::AddResult -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - addPassingHashCode(T&& key, Extra&& extra) { - DCHECK(!accessForbidden()); - DCHECK(Allocator::isAllocationAllowed()); - if (!m_table) - expand(); - - FullLookupType lookupResult = fullLookupForWriting<HashTranslator>(key); - - ValueType* entry = lookupResult.first.first; - bool found = lookupResult.first.second; - unsigned h = lookupResult.second; - - if (found) - return AddResult(this, entry, false); - - registerModification(); - - if (isDeletedBucket(*entry)) { - initializeBucket(*entry); - --m_deletedCount; - } - - HashTranslator::translate(*entry, std::forward<T>(key), - std::forward<Extra>(extra), h); - DCHECK(!isEmptyOrDeletedBucket(*entry)); - - ++m_keyCount; - if (shouldExpand()) - entry = expand(entry); - - return AddResult(this, entry, true); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - reinsert(ValueType&& entry) { - DCHECK(m_table); - registerModification(); - DCHECK(!lookupForWriting(Extractor::extract(entry)).second); - DCHECK( - !isDeletedBucket(*(lookupForWriting(Extractor::extract(entry)).first))); -#if DUMP_HASHTABLE_STATS - atomicIncrement(&HashTableStats::instance().numReinserts); -#endif -#if DUMP_HASHTABLE_STATS_PER_TABLE - ++m_stats->numReinserts; -#endif - Value* newEntry = lookupForWriting(Extractor::extract(entry)).first; - Mover<ValueType, Allocator, - Traits::template NeedsToForbidGCOnMove<>::value>::move(std::move(entry), - *newEntry); - - return newEntry; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -inline typename HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::iterator -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - find(const T& key) { - ValueType* entry = lookup<HashTranslator>(key); - if (!entry) - return end(); - - return makeKnownGoodIterator(entry); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -inline typename HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::const_iterator -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - find(const T& key) const { - ValueType* entry = const_cast<HashTable*>(this)->lookup<HashTranslator>(key); - if (!entry) - return end(); - - return makeKnownGoodConstIterator(entry); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename HashTranslator, typename T> -bool HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::contains(const T& key) const { - return const_cast<HashTable*>(this)->lookup<HashTranslator>(key); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -void HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::remove(ValueType* pos) { - registerModification(); -#if DUMP_HASHTABLE_STATS - atomicIncrement(&HashTableStats::instance().numRemoves); -#endif -#if DUMP_HASHTABLE_STATS_PER_TABLE - ++m_stats->numRemoves; -#endif - - enterAccessForbiddenScope(); - deleteBucket(*pos); - leaveAccessForbiddenScope(); - ++m_deletedCount; - --m_keyCount; - - if (shouldShrink()) - shrink(); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -inline void -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - remove(iterator it) { - if (it == end()) - return; - remove(const_cast<ValueType*>(it.m_iterator.m_position)); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -inline void -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - remove(const_iterator it) { - if (it == end()) - return; - remove(const_cast<ValueType*>(it.m_position)); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -inline void -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - remove(KeyPeekInType key) { - remove(find(key)); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - allocateTable(unsigned size) { - size_t allocSize = size * sizeof(ValueType); - ValueType* result; - // Assert that we will not use memset on things with a vtable entry. The - // compiler will also check this on some platforms. We would like to check - // this on the whole value (key-value pair), but std::is_polymorphic will - // return false for a pair of two types, even if one of the components is - // polymorphic. - static_assert( - !Traits::emptyValueIsZero || !std::is_polymorphic<KeyType>::value, - "empty value cannot be zero for things with a vtable"); - static_assert(Allocator::isGarbageCollected || - ((!AllowsOnlyPlacementNew<KeyType>::value || - !IsTraceable<KeyType>::value) && - (!AllowsOnlyPlacementNew<ValueType>::value || - !IsTraceable<ValueType>::value)), - "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " - "have trace methods into an off-heap HashTable"); - - if (Traits::emptyValueIsZero) { - result = Allocator::template allocateZeroedHashTableBacking<ValueType, - HashTable>( - allocSize); - } else { - result = Allocator::template allocateHashTableBacking<ValueType, HashTable>( - allocSize); - for (unsigned i = 0; i < size; i++) - initializeBucket(result[i]); - } - return result; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -void HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::deleteAllBucketsAndDeallocate(ValueType* table, - unsigned size) { - if (!IsTriviallyDestructible<ValueType>::value) { - for (unsigned i = 0; i < size; ++i) { - // This code is called when the hash table is cleared or resized. We - // have allocated a new backing store and we need to run the - // destructors on the old backing store, as it is being freed. If we - // are GCing we need to both call the destructor and mark the bucket - // as deleted, otherwise the destructor gets called again when the - // GC finds the backing store. With the default allocator it's - // enough to call the destructor, since we will free the memory - // explicitly and we won't see the memory with the bucket again. - if (Allocator::isGarbageCollected) { - if (!isEmptyOrDeletedBucket(table[i])) - deleteBucket(table[i]); - } else { - if (!isDeletedBucket(table[i])) - table[i].~ValueType(); - } - } - } - Allocator::freeHashTableBacking(table); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - expand(Value* entry) { - unsigned newSize; - if (!m_tableSize) { - newSize = KeyTraits::minimumTableSize; - } else if (mustRehashInPlace()) { - newSize = m_tableSize; - } else { - newSize = m_tableSize * 2; - RELEASE_ASSERT(newSize > m_tableSize); - } - - return rehash(newSize, entry); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - expandBuffer(unsigned newTableSize, Value* entry, bool& success) { - success = false; - DCHECK_LT(m_tableSize, newTableSize); - if (!Allocator::expandHashTableBacking(m_table, - newTableSize * sizeof(ValueType))) - return nullptr; - - success = true; - - Value* newEntry = nullptr; - unsigned oldTableSize = m_tableSize; - ValueType* originalTable = m_table; - - ValueType* temporaryTable = allocateTable(oldTableSize); - for (unsigned i = 0; i < oldTableSize; i++) { - if (&m_table[i] == entry) - newEntry = &temporaryTable[i]; - if (isEmptyOrDeletedBucket(m_table[i])) { - DCHECK_NE(&m_table[i], entry); - if (Traits::emptyValueIsZero) { - memset(&temporaryTable[i], 0, sizeof(ValueType)); - } else { - initializeBucket(temporaryTable[i]); - } - } else { - Mover<ValueType, Allocator, - Traits::template NeedsToForbidGCOnMove<>::value>:: - move(std::move(m_table[i]), temporaryTable[i]); - } - } - m_table = temporaryTable; - - if (Traits::emptyValueIsZero) { - memset(originalTable, 0, newTableSize * sizeof(ValueType)); - } else { - for (unsigned i = 0; i < newTableSize; i++) - initializeBucket(originalTable[i]); - } - newEntry = rehashTo(originalTable, newTableSize, newEntry); - - enterAccessForbiddenScope(); - deleteAllBucketsAndDeallocate(temporaryTable, oldTableSize); - leaveAccessForbiddenScope(); - - return newEntry; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - rehashTo(ValueType* newTable, unsigned newTableSize, Value* entry) { - unsigned oldTableSize = m_tableSize; - ValueType* oldTable = m_table; - -#if DUMP_HASHTABLE_STATS - if (oldTableSize != 0) - atomicIncrement(&HashTableStats::instance().numRehashes); -#endif - -#if DUMP_HASHTABLE_STATS_PER_TABLE - if (oldTableSize != 0) - ++m_stats->numRehashes; -#endif - - m_table = newTable; - m_tableSize = newTableSize; - - Value* newEntry = nullptr; - for (unsigned i = 0; i != oldTableSize; ++i) { - if (isEmptyOrDeletedBucket(oldTable[i])) { - DCHECK_NE(&oldTable[i], entry); - continue; - } - Value* reinsertedEntry = reinsert(std::move(oldTable[i])); - if (&oldTable[i] == entry) { - DCHECK(!newEntry); - newEntry = reinsertedEntry; - } - } - - m_deletedCount = 0; - -#if DUMP_HASHTABLE_STATS_PER_TABLE - if (!m_stats) - m_stats = HashTableStatsPtr<Allocator>::create(); -#endif - - return newEntry; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -Value* -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - rehash(unsigned newTableSize, Value* entry) { - unsigned oldTableSize = m_tableSize; - ValueType* oldTable = m_table; - -#if DUMP_HASHTABLE_STATS - if (oldTableSize != 0) - atomicIncrement(&HashTableStats::instance().numRehashes); -#endif - -#if DUMP_HASHTABLE_STATS_PER_TABLE - if (oldTableSize != 0) - ++m_stats->numRehashes; -#endif - - // The Allocator::isGarbageCollected check is not needed. The check is just - // a static hint for a compiler to indicate that Base::expandBuffer returns - // false if Allocator is a PartitionAllocator. - if (Allocator::isGarbageCollected && newTableSize > oldTableSize) { - bool success; - Value* newEntry = expandBuffer(newTableSize, entry, success); - if (success) - return newEntry; - } - - ValueType* newTable = allocateTable(newTableSize); - Value* newEntry = rehashTo(newTable, newTableSize, entry); - - enterAccessForbiddenScope(); - deleteAllBucketsAndDeallocate(oldTable, oldTableSize); - leaveAccessForbiddenScope(); - - return newEntry; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -void HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::clear() { - registerModification(); - if (!m_table) - return; - - enterAccessForbiddenScope(); - deleteAllBucketsAndDeallocate(m_table, m_tableSize); - leaveAccessForbiddenScope(); - m_table = nullptr; - m_tableSize = 0; - m_keyCount = 0; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - HashTable(const HashTable& other) - : m_table(nullptr), - m_tableSize(0), - m_keyCount(0), - m_deletedCount(0), - m_queueFlag(false) -#if DCHECK_IS_ON() - , - m_accessForbidden(false), - m_modifications(0) -#endif -#if DUMP_HASHTABLE_STATS_PER_TABLE - , - m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) -#endif -{ - if (other.size()) - reserveCapacityForSize(other.size()); - // Copy the hash table the dumb way, by adding each element to the new - // table. It might be more efficient to copy the table slots, but it's not - // clear that efficiency is needed. - for (const auto& element : other) - add(element); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: - HashTable(HashTable&& other) - : m_table(nullptr), - m_tableSize(0), - m_keyCount(0), - m_deletedCount(0), - m_queueFlag(false) -#if DCHECK_IS_ON() - , - m_accessForbidden(false), - m_modifications(0) -#endif -#if DUMP_HASHTABLE_STATS_PER_TABLE - , - m_stats(HashTableStatsPtr<Allocator>::copy(other.m_stats)) -#endif -{ - swap(other); -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -void HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::swap(HashTable& other) { - DCHECK(!accessForbidden()); - std::swap(m_table, other.m_table); - std::swap(m_tableSize, other.m_tableSize); - std::swap(m_keyCount, other.m_keyCount); - // std::swap does not work for bit fields. - unsigned deleted = m_deletedCount; - m_deletedCount = other.m_deletedCount; - other.m_deletedCount = deleted; - DCHECK(!m_queueFlag); - DCHECK(!other.m_queueFlag); - -#if DCHECK_IS_ON() - std::swap(m_modifications, other.m_modifications); -#endif - -#if DUMP_HASHTABLE_STATS_PER_TABLE - HashTableStatsPtr<Allocator>::swap(m_stats, other.m_stats); -#endif -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: -operator=(const HashTable& other) { - HashTable tmp(other); - swap(tmp); - return *this; -} - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>& -HashTable<Key, Value, Extractor, HashFunctions, Traits, KeyTraits, Allocator>:: -operator=(HashTable&& other) { - swap(other); - return *this; -} - -template <WeakHandlingFlag weakHandlingFlag, - typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -struct WeakProcessingHashTableHelper; - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -struct WeakProcessingHashTableHelper<NoWeakHandlingInCollections, - Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> { - STATIC_ONLY(WeakProcessingHashTableHelper); - static void process(typename Allocator::Visitor* visitor, void* closure) {} - static void ephemeronIteration(typename Allocator::Visitor* visitor, - void* closure) {} - static void ephemeronIterationDone(typename Allocator::Visitor* visitor, - void* closure) {} -}; - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -struct WeakProcessingHashTableHelper<WeakHandlingInCollections, - Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator> { - STATIC_ONLY(WeakProcessingHashTableHelper); - - using HashTableType = HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>; - using ValueType = typename HashTableType::ValueType; - - // Used for purely weak and for weak-and-strong tables (ephemerons). - static void process(typename Allocator::Visitor* visitor, void* closure) { - HashTableType* table = reinterpret_cast<HashTableType*>(closure); - if (!table->m_table) - return; - // Now perform weak processing (this is a no-op if the backing was - // accessible through an iterator and was already marked strongly). - for (ValueType* element = table->m_table + table->m_tableSize - 1; - element >= table->m_table; element--) { - if (!HashTableType::isEmptyOrDeletedBucket(*element)) { - // At this stage calling trace can make no difference - // (everything is already traced), but we use the return value - // to remove things from the collection. - - // FIXME: This should be rewritten so that this can check if the - // element is dead without calling trace, which is semantically - // not correct to be called in weak processing stage. - if (TraceInCollectionTrait<WeakHandlingInCollections, - WeakPointersActWeak, ValueType, - Traits>::trace(visitor, *element)) { - table->registerModification(); - HashTableType::deleteBucket(*element); // Also calls the destructor. - table->m_deletedCount++; - table->m_keyCount--; - // We don't rehash the backing until the next add or delete, - // because that would cause allocation during GC. - } - } - } - } - - // Called repeatedly for tables that have both weak and strong pointers. - static void ephemeronIteration(typename Allocator::Visitor* visitor, - void* closure) { - HashTableType* table = reinterpret_cast<HashTableType*>(closure); - DCHECK(table->m_table); - // Check the hash table for elements that we now know will not be - // removed by weak processing. Those elements need to have their strong - // pointers traced. - for (ValueType* element = table->m_table + table->m_tableSize - 1; - element >= table->m_table; element--) { - if (!HashTableType::isEmptyOrDeletedBucket(*element)) - TraceInCollectionTrait<WeakHandlingInCollections, WeakPointersActWeak, - ValueType, Traits>::trace(visitor, *element); - } - } - - // Called when the ephemeron iteration is done and before running the per - // thread weak processing. It is guaranteed to be called before any thread - // is resumed. - static void ephemeronIterationDone(typename Allocator::Visitor* visitor, - void* closure) { - HashTableType* table = reinterpret_cast<HashTableType*>(closure); -#if DCHECK_IS_ON() - DCHECK(Allocator::weakTableRegistered(visitor, table)); -#endif - table->clearEnqueued(); - } -}; - -template <typename Key, - typename Value, - typename Extractor, - typename HashFunctions, - typename Traits, - typename KeyTraits, - typename Allocator> -template <typename VisitorDispatcher> -void HashTable<Key, - Value, - Extractor, - HashFunctions, - Traits, - KeyTraits, - Allocator>::trace(VisitorDispatcher visitor) { -#if DUMP_HASHTABLE_STATS_PER_TABLE - Allocator::markNoTracing(visitor, m_stats); -#endif - - // If someone else already marked the backing and queued up the trace and/or - // weak callback then we are done. This optimization does not happen for - // ListHashSet since its iterator does not point at the backing. - if (!m_table || Allocator::isHeapObjectAlive(m_table)) - return; - - // Normally, we mark the backing store without performing trace. This means - // it is marked live, but the pointers inside it are not marked. Instead we - // will mark the pointers below. However, for backing stores that contain - // weak pointers the handling is rather different. We don't mark the - // backing store here, so the marking GC will leave the backing unmarked. If - // the backing is found in any other way than through its HashTable (ie from - // an iterator) then the mark bit will be set and the pointers will be - // marked strongly, avoiding problems with iterating over things that - // disappear due to weak processing while we are iterating over them. We - // register the backing store pointer for delayed marking which will take - // place after we know if the backing is reachable from elsewhere. We also - // register a weakProcessing callback which will perform weak processing if - // needed. - if (Traits::weakHandlingFlag == NoWeakHandlingInCollections) { - Allocator::markNoTracing(visitor, m_table); - } else { - Allocator::registerDelayedMarkNoTracing(visitor, m_table); - // Since we're delaying marking this HashTable, it is possible that the - // registerWeakMembers is called multiple times (in rare - // cases). However, it shouldn't cause any issue. - Allocator::registerWeakMembers( - visitor, this, - WeakProcessingHashTableHelper<Traits::weakHandlingFlag, Key, Value, - Extractor, HashFunctions, Traits, - KeyTraits, Allocator>::process); - } - // If the backing store will be moved by sweep compaction, register the - // table reference pointing to the backing store object, so that the - // reference is updated upon object relocation. A no-op if not enabled - // by the visitor. - Allocator::registerBackingStoreReference(visitor, &m_table); - if (!IsTraceableInCollectionTrait<Traits>::value) - return; - if (Traits::weakHandlingFlag == WeakHandlingInCollections) { - // If we have both strong and weak pointers in the collection then - // we queue up the collection for fixed point iteration a la - // Ephemerons: - // http://dl.acm.org/citation.cfm?doid=263698.263733 - see also - // http://www.jucs.org/jucs_14_21/eliminating_cycles_in_weak -#if DCHECK_IS_ON() - DCHECK(!enqueued() || Allocator::weakTableRegistered(visitor, this)); -#endif - if (!enqueued()) { - Allocator::registerWeakTable( - visitor, this, - WeakProcessingHashTableHelper< - Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, - Traits, KeyTraits, Allocator>::ephemeronIteration, - WeakProcessingHashTableHelper< - Traits::weakHandlingFlag, Key, Value, Extractor, HashFunctions, - Traits, KeyTraits, Allocator>::ephemeronIterationDone); - setEnqueued(); - } - // We don't need to trace the elements here, since registering as a - // weak table above will cause them to be traced (perhaps several - // times). It's better to wait until everything else is traced - // before tracing the elements for the first time; this may reduce - // (by one) the number of iterations needed to get to a fixed point. - return; - } - for (ValueType* element = m_table + m_tableSize - 1; element >= m_table; - element--) { - if (!isEmptyOrDeletedBucket(*element)) - Allocator::template trace<VisitorDispatcher, ValueType, Traits>(visitor, - *element); - } -} - -// iterator adapters - -template <typename HashTableType, typename Traits> -struct HashTableConstIteratorAdapter { - STACK_ALLOCATED(); - HashTableConstIteratorAdapter() {} - HashTableConstIteratorAdapter( - const typename HashTableType::const_iterator& impl) - : m_impl(impl) {} - typedef typename Traits::IteratorConstGetType GetType; - typedef - typename HashTableType::ValueTraits::IteratorConstGetType SourceGetType; - - GetType get() const { - return const_cast<GetType>(SourceGetType(m_impl.get())); - } - typename Traits::IteratorConstReferenceType operator*() const { - return Traits::getToReferenceConstConversion(get()); - } - GetType operator->() const { return get(); } - - HashTableConstIteratorAdapter& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - typename HashTableType::const_iterator m_impl; -}; - -template <typename HashTable, typename Traits> -std::ostream& operator<<( - std::ostream& stream, - const HashTableConstIteratorAdapter<HashTable, Traits>& iterator) { - return stream << iterator.m_impl; -} - -template <typename HashTableType, typename Traits> -struct HashTableIteratorAdapter { - STACK_ALLOCATED(); - typedef typename Traits::IteratorGetType GetType; - typedef typename HashTableType::ValueTraits::IteratorGetType SourceGetType; - - HashTableIteratorAdapter() {} - HashTableIteratorAdapter(const typename HashTableType::iterator& impl) - : m_impl(impl) {} - - GetType get() const { - return const_cast<GetType>(SourceGetType(m_impl.get())); - } - typename Traits::IteratorReferenceType operator*() const { - return Traits::getToReferenceConversion(get()); - } - GetType operator->() const { return get(); } - - HashTableIteratorAdapter& operator++() { - ++m_impl; - return *this; - } - // postfix ++ intentionally omitted - - operator HashTableConstIteratorAdapter<HashTableType, Traits>() { - typename HashTableType::const_iterator i = m_impl; - return i; - } - - typename HashTableType::iterator m_impl; -}; - -template <typename HashTable, typename Traits> -std::ostream& operator<<( - std::ostream& stream, - const HashTableIteratorAdapter<HashTable, Traits>& iterator) { - return stream << iterator.m_impl; -} - -template <typename T, typename U> -inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, - const HashTableConstIteratorAdapter<T, U>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U> -inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, - const HashTableConstIteratorAdapter<T, U>& b) { - return a.m_impl != b.m_impl; -} - -template <typename T, typename U> -inline bool operator==(const HashTableIteratorAdapter<T, U>& a, - const HashTableIteratorAdapter<T, U>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U> -inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, - const HashTableIteratorAdapter<T, U>& b) { - return a.m_impl != b.m_impl; -} - -// All 4 combinations of ==, != and Const,non const. -template <typename T, typename U> -inline bool operator==(const HashTableConstIteratorAdapter<T, U>& a, - const HashTableIteratorAdapter<T, U>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U> -inline bool operator!=(const HashTableConstIteratorAdapter<T, U>& a, - const HashTableIteratorAdapter<T, U>& b) { - return a.m_impl != b.m_impl; -} - -template <typename T, typename U> -inline bool operator==(const HashTableIteratorAdapter<T, U>& a, - const HashTableConstIteratorAdapter<T, U>& b) { - return a.m_impl == b.m_impl; -} - -template <typename T, typename U> -inline bool operator!=(const HashTableIteratorAdapter<T, U>& a, - const HashTableConstIteratorAdapter<T, U>& b) { - return a.m_impl != b.m_impl; -} - -template <typename Collection1, typename Collection2> -inline void removeAll(Collection1& collection, const Collection2& toBeRemoved) { - if (collection.isEmpty() || toBeRemoved.isEmpty()) - return; - typedef typename Collection2::const_iterator CollectionIterator; - CollectionIterator end(toBeRemoved.end()); - for (CollectionIterator it(toBeRemoved.begin()); it != end; ++it) - collection.erase(*it); -} - -} // namespace WTF - -#include "wtf/HashIterators.h" - -#endif // WTF_HashTable_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/HashTraits.h b/third_party/WebKit/Source/wtf/HashTraits.h index cde60cbc..42b464e 100644 --- a/third_party/WebKit/Source/wtf/HashTraits.h +++ b/third_party/WebKit/Source/wtf/HashTraits.h
@@ -1,429 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights - * reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_HashTraits_h -#define WTF_HashTraits_h +#include "platform/wtf/HashTraits.h" -#include "wtf/Forward.h" -#include "wtf/HashFunctions.h" -#include "wtf/HashTableDeletedValueType.h" -#include "wtf/StdLibExtras.h" -#include "wtf/TypeTraits.h" -#include <limits> -#include <memory> -#include <string.h> // For memset. -#include <type_traits> -#include <utility> - -namespace WTF { - -template <bool isInteger, typename T> -struct GenericHashTraitsBase; -template <typename T> -struct HashTraits; - -enum ShouldWeakPointersBeMarkedStrongly { - WeakPointersActStrong, - WeakPointersActWeak -}; - -template <typename T> -struct GenericHashTraitsBase<false, T> { - // The emptyValueIsZero flag is used to optimize allocation of empty hash - // tables with zeroed memory. - static const bool emptyValueIsZero = false; - - // The hasIsEmptyValueFunction flag allows the hash table to automatically - // generate code to check for the empty value when it can be done with the - // equality operator, but allows custom functions for cases like String that - // need them. - static const bool hasIsEmptyValueFunction = false; - -// The starting table size. Can be overridden when we know beforehand that a -// hash table will have at least N entries. -#if defined(MEMORY_SANITIZER_INITIAL_SIZE) - static const unsigned minimumTableSize = 1; -#else - static const unsigned minimumTableSize = 8; -#endif - - // When a hash table backing store is traced, its elements will be - // traced if their class type has a trace method. However, weak-referenced - // elements should not be traced then, but handled by the weak processing - // phase that follows. - template <typename U = void> - struct IsTraceableInCollection { - static const bool value = IsTraceable<T>::value && !IsWeak<T>::value; - }; - - // The NeedsToForbidGCOnMove flag is used to make the hash table move - // operations safe when GC is enabled: if a move constructor invokes - // an allocation triggering the GC then it should be invoked within GC - // forbidden scope. - template <typename U = void> - struct NeedsToForbidGCOnMove { - // TODO(yutak): Consider using of std:::is_trivially_move_constructible - // when it is accessible. - static const bool value = !std::is_pod<T>::value; - }; - - static const WeakHandlingFlag weakHandlingFlag = - IsWeak<T>::value ? WeakHandlingInCollections - : NoWeakHandlingInCollections; -}; - -// Default integer traits disallow both 0 and -1 as keys (max value instead of -// -1 for unsigned). -template <typename T> -struct GenericHashTraitsBase<true, T> : GenericHashTraitsBase<false, T> { - static const bool emptyValueIsZero = true; - static void constructDeletedValue(T& slot, bool) { - slot = static_cast<T>(-1); - } - static bool isDeletedValue(T value) { return value == static_cast<T>(-1); } -}; - -template <typename T> -struct GenericHashTraits - : GenericHashTraitsBase<std::is_integral<T>::value, T> { - typedef T TraitType; - typedef T EmptyValueType; - - static T emptyValue() { return T(); } - - // Type for functions that do not take ownership, such as contains. - typedef const T& PeekInType; - typedef T* IteratorGetType; - typedef const T* IteratorConstGetType; - typedef T& IteratorReferenceType; - typedef const T& IteratorConstReferenceType; - static IteratorReferenceType getToReferenceConversion(IteratorGetType x) { - return *x; - } - static IteratorConstReferenceType getToReferenceConstConversion( - IteratorConstGetType x) { - return *x; - } - - template <typename IncomingValueType> - static void store(IncomingValueType&& value, T& storage) { - storage = std::forward<IncomingValueType>(value); - } - - // Type for return value of functions that do not transfer ownership, such - // as get. - // FIXME: We could change this type to const T& for better performance if we - // figured out a way to handle the return value from emptyValue, which is a - // temporary. - typedef T PeekOutType; - static const T& peek(const T& value) { return value; } -}; - -template <typename T> -struct HashTraits : GenericHashTraits<T> {}; - -template <typename T> -struct FloatHashTraits : GenericHashTraits<T> { - static T emptyValue() { return std::numeric_limits<T>::infinity(); } - static void constructDeletedValue(T& slot, bool) { - slot = -std::numeric_limits<T>::infinity(); - } - static bool isDeletedValue(T value) { - return value == -std::numeric_limits<T>::infinity(); - } -}; - -template <> -struct HashTraits<float> : FloatHashTraits<float> {}; -template <> -struct HashTraits<double> : FloatHashTraits<double> {}; - -// Default unsigned traits disallow both 0 and max as keys -- use these traits -// to allow zero and disallow max - 1. -template <typename T> -struct UnsignedWithZeroKeyHashTraits : GenericHashTraits<T> { - static const bool emptyValueIsZero = false; - static T emptyValue() { return std::numeric_limits<T>::max(); } - static void constructDeletedValue(T& slot, bool) { - slot = std::numeric_limits<T>::max() - 1; - } - static bool isDeletedValue(T value) { - return value == std::numeric_limits<T>::max() - 1; - } -}; - -template <typename P> -struct HashTraits<P*> : GenericHashTraits<P*> { - static const bool emptyValueIsZero = true; - static void constructDeletedValue(P*& slot, bool) { - slot = reinterpret_cast<P*>(-1); - } - static bool isDeletedValue(P* value) { - return value == reinterpret_cast<P*>(-1); - } -}; - -template <typename T> -struct SimpleClassHashTraits : GenericHashTraits<T> { - static const bool emptyValueIsZero = true; - template <typename U = void> - struct NeedsToForbidGCOnMove { - static const bool value = false; - }; - static void constructDeletedValue(T& slot, bool) { - new (NotNull, &slot) T(HashTableDeletedValue); - } - static bool isDeletedValue(const T& value) { - return value.isHashTableDeletedValue(); - } -}; - -template <typename P> -struct HashTraits<RefPtr<P>> : SimpleClassHashTraits<RefPtr<P>> { - typedef std::nullptr_t EmptyValueType; - static EmptyValueType emptyValue() { return nullptr; } - - static const bool hasIsEmptyValueFunction = true; - static bool isEmptyValue(const RefPtr<P>& value) { return !value; } - - typedef RefPtrValuePeeker<P> PeekInType; - typedef RefPtr<P>* IteratorGetType; - typedef const RefPtr<P>* IteratorConstGetType; - typedef RefPtr<P>& IteratorReferenceType; - typedef const RefPtr<P>& IteratorConstReferenceType; - static IteratorReferenceType getToReferenceConversion(IteratorGetType x) { - return *x; - } - static IteratorConstReferenceType getToReferenceConstConversion( - IteratorConstGetType x) { - return *x; - } - - static void store(PassRefPtr<P> value, RefPtr<P>& storage) { - storage = std::move(value); - } - - typedef P* PeekOutType; - static PeekOutType peek(const RefPtr<P>& value) { return value.get(); } - static PeekOutType peek(std::nullptr_t) { return 0; } -}; - -template <typename T> -struct HashTraits<std::unique_ptr<T>> - : SimpleClassHashTraits<std::unique_ptr<T>> { - using EmptyValueType = std::nullptr_t; - static EmptyValueType emptyValue() { return nullptr; } - - static const bool hasIsEmptyValueFunction = true; - static bool isEmptyValue(const std::unique_ptr<T>& value) { return !value; } - - using PeekInType = T*; - - static void store(std::unique_ptr<T>&& value, std::unique_ptr<T>& storage) { - storage = std::move(value); - } - - using PeekOutType = T*; - static PeekOutType peek(const std::unique_ptr<T>& value) { - return value.get(); - } - static PeekOutType peek(std::nullptr_t) { return nullptr; } - - static void constructDeletedValue(std::unique_ptr<T>& slot, bool) { - // Dirty trick: implant an invalid pointer to unique_ptr. Destructor isn't - // called for deleted buckets, so this is okay. - new (NotNull, &slot) std::unique_ptr<T>(reinterpret_cast<T*>(1u)); - } - static bool isDeletedValue(const std::unique_ptr<T>& value) { - return value.get() == reinterpret_cast<T*>(1u); - } -}; - -template <> -struct HashTraits<String> : SimpleClassHashTraits<String> { - static const bool hasIsEmptyValueFunction = true; - static bool isEmptyValue(const String&); -}; - -// This struct template is an implementation detail of the -// isHashTraitsEmptyValue function, which selects either the emptyValue function -// or the isEmptyValue function to check for empty values. -template <typename Traits, bool hasEmptyValueFunction> -struct HashTraitsEmptyValueChecker; -template <typename Traits> -struct HashTraitsEmptyValueChecker<Traits, true> { - template <typename T> - static bool isEmptyValue(const T& value) { - return Traits::isEmptyValue(value); - } -}; -template <typename Traits> -struct HashTraitsEmptyValueChecker<Traits, false> { - template <typename T> - static bool isEmptyValue(const T& value) { - return value == Traits::emptyValue(); - } -}; -template <typename Traits, typename T> -inline bool isHashTraitsEmptyValue(const T& value) { - return HashTraitsEmptyValueChecker< - Traits, Traits::hasIsEmptyValueFunction>::isEmptyValue(value); -} - -template <typename FirstTraitsArg, typename SecondTraitsArg> -struct PairHashTraits - : GenericHashTraits<std::pair<typename FirstTraitsArg::TraitType, - typename SecondTraitsArg::TraitType>> { - typedef FirstTraitsArg FirstTraits; - typedef SecondTraitsArg SecondTraits; - typedef std::pair<typename FirstTraits::TraitType, - typename SecondTraits::TraitType> - TraitType; - typedef std::pair<typename FirstTraits::EmptyValueType, - typename SecondTraits::EmptyValueType> - EmptyValueType; - - static const bool emptyValueIsZero = - FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero; - static EmptyValueType emptyValue() { - return std::make_pair(FirstTraits::emptyValue(), - SecondTraits::emptyValue()); - } - - static const bool hasIsEmptyValueFunction = - FirstTraits::hasIsEmptyValueFunction || - SecondTraits::hasIsEmptyValueFunction; - static bool isEmptyValue(const TraitType& value) { - return isHashTraitsEmptyValue<FirstTraits>(value.first) && - isHashTraitsEmptyValue<SecondTraits>(value.second); - } - - static const unsigned minimumTableSize = FirstTraits::minimumTableSize; - - static void constructDeletedValue(TraitType& slot, bool zeroValue) { - FirstTraits::constructDeletedValue(slot.first, zeroValue); - // For GC collections the memory for the backing is zeroed when it is - // allocated, and the constructors may take advantage of that, - // especially if a GC occurs during insertion of an entry into the - // table. This slot is being marked deleted, but If the slot is reused - // at a later point, the same assumptions around memory zeroing must - // hold as they did at the initial allocation. Therefore we zero the - // value part of the slot here for GC collections. - if (zeroValue) - memset(reinterpret_cast<void*>(&slot.second), 0, sizeof(slot.second)); - } - static bool isDeletedValue(const TraitType& value) { - return FirstTraits::isDeletedValue(value.first); - } -}; - -template <typename First, typename Second> -struct HashTraits<std::pair<First, Second>> - : public PairHashTraits<HashTraits<First>, HashTraits<Second>> {}; - -template <typename KeyTypeArg, typename ValueTypeArg> -struct KeyValuePair { - typedef KeyTypeArg KeyType; - - template <typename IncomingKeyType, typename IncomingValueType> - KeyValuePair(IncomingKeyType&& key, IncomingValueType&& value) - : key(std::forward<IncomingKeyType>(key)), - value(std::forward<IncomingValueType>(value)) {} - - template <typename OtherKeyType, typename OtherValueType> - KeyValuePair(KeyValuePair<OtherKeyType, OtherValueType>&& other) - : key(std::move(other.key)), value(std::move(other.value)) {} - - KeyTypeArg key; - ValueTypeArg value; -}; - -template <typename KeyTraitsArg, typename ValueTraitsArg> -struct KeyValuePairHashTraits - : GenericHashTraits<KeyValuePair<typename KeyTraitsArg::TraitType, - typename ValueTraitsArg::TraitType>> { - typedef KeyTraitsArg KeyTraits; - typedef ValueTraitsArg ValueTraits; - typedef KeyValuePair<typename KeyTraits::TraitType, - typename ValueTraits::TraitType> - TraitType; - typedef KeyValuePair<typename KeyTraits::EmptyValueType, - typename ValueTraits::EmptyValueType> - EmptyValueType; - - static const bool emptyValueIsZero = - KeyTraits::emptyValueIsZero && ValueTraits::emptyValueIsZero; - static EmptyValueType emptyValue() { - return KeyValuePair<typename KeyTraits::EmptyValueType, - typename ValueTraits::EmptyValueType>( - KeyTraits::emptyValue(), ValueTraits::emptyValue()); - } - - template <typename U = void> - struct IsTraceableInCollection { - static const bool value = IsTraceableInCollectionTrait<KeyTraits>::value || - IsTraceableInCollectionTrait<ValueTraits>::value; - }; - - template <typename U = void> - struct NeedsToForbidGCOnMove { - static const bool value = - KeyTraits::template NeedsToForbidGCOnMove<>::value || - ValueTraits::template NeedsToForbidGCOnMove<>::value; - }; - - static const WeakHandlingFlag weakHandlingFlag = - (KeyTraits::weakHandlingFlag == WeakHandlingInCollections || - ValueTraits::weakHandlingFlag == WeakHandlingInCollections) - ? WeakHandlingInCollections - : NoWeakHandlingInCollections; - - static const unsigned minimumTableSize = KeyTraits::minimumTableSize; - - static void constructDeletedValue(TraitType& slot, bool zeroValue) { - KeyTraits::constructDeletedValue(slot.key, zeroValue); - // See similar code in this file for why we need to do this. - if (zeroValue) - memset(reinterpret_cast<void*>(&slot.value), 0, sizeof(slot.value)); - } - static bool isDeletedValue(const TraitType& value) { - return KeyTraits::isDeletedValue(value.key); - } -}; - -template <typename Key, typename Value> -struct HashTraits<KeyValuePair<Key, Value>> - : public KeyValuePairHashTraits<HashTraits<Key>, HashTraits<Value>> {}; - -template <typename T> -struct NullableHashTraits : public HashTraits<T> { - static const bool emptyValueIsZero = false; - static T emptyValue() { return reinterpret_cast<T>(1); } -}; - -} // namespace WTF - -using WTF::HashTraits; -using WTF::PairHashTraits; -using WTF::NullableHashTraits; -using WTF::SimpleClassHashTraits; - -#endif // WTF_HashTraits_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/LinkedHashSet.h b/third_party/WebKit/Source/wtf/LinkedHashSet.h index f0e58176b..534e242 100644 --- a/third_party/WebKit/Source/wtf/LinkedHashSet.h +++ b/third_party/WebKit/Source/wtf/LinkedHashSet.h
@@ -1,938 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights - * reserved. - * Copyright (C) 2011, Benjamin Poulain <ikipou@gmail.com> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_LinkedHashSet_h -#define WTF_LinkedHashSet_h +#include "platform/wtf/LinkedHashSet.h" -#include "wtf/AddressSanitizer.h" -#include "wtf/HashSet.h" -#include "wtf/allocator/PartitionAllocator.h" - -namespace WTF { - -// LinkedHashSet: Just like HashSet, this class provides a Set -// interface - a collection of unique objects with O(1) insertion, -// removal and test for containership. However, it also has an -// order - iterating it will always give back values in the order -// in which they are added. - -// Unlike ListHashSet, but like most WTF collections, iteration is NOT safe -// against mutation of the LinkedHashSet. - -template <typename Value, - typename HashFunctions, - typename HashTraits, - typename Allocator> -class LinkedHashSet; - -template <typename LinkedHashSet> -class LinkedHashSetIterator; -template <typename LinkedHashSet> -class LinkedHashSetConstIterator; -template <typename LinkedHashSet> -class LinkedHashSetReverseIterator; -template <typename LinkedHashSet> -class LinkedHashSetConstReverseIterator; - -template <typename Value, typename HashFunctions, typename Allocator> -struct LinkedHashSetTranslator; -template <typename Value, typename Allocator> -struct LinkedHashSetExtractor; -template <typename Value, typename ValueTraits, typename Allocator> -struct LinkedHashSetTraits; - -class LinkedHashSetNodeBase { - DISALLOW_NEW(); - - public: - LinkedHashSetNodeBase() : m_prev(this), m_next(this) {} - - NO_SANITIZE_ADDRESS - void unlink() { - if (!m_next) - return; - DCHECK(m_prev); - DCHECK(m_next->m_prev == this); - DCHECK(m_prev->m_next == this); - m_next->m_prev = m_prev; - m_prev->m_next = m_next; - } - - ~LinkedHashSetNodeBase() { unlink(); } - - void insertBefore(LinkedHashSetNodeBase& other) { - other.m_next = this; - other.m_prev = m_prev; - m_prev->m_next = &other; - m_prev = &other; - DCHECK(other.m_next); - DCHECK(other.m_prev); - DCHECK(m_next); - DCHECK(m_prev); - } - - void insertAfter(LinkedHashSetNodeBase& other) { - other.m_prev = this; - other.m_next = m_next; - m_next->m_prev = &other; - m_next = &other; - DCHECK(other.m_next); - DCHECK(other.m_prev); - DCHECK(m_next); - DCHECK(m_prev); - } - - LinkedHashSetNodeBase(LinkedHashSetNodeBase* prev, - LinkedHashSetNodeBase* next) - : m_prev(prev), m_next(next) { - DCHECK((prev && next) || (!prev && !next)); - } - - LinkedHashSetNodeBase* m_prev; - LinkedHashSetNodeBase* m_next; - - protected: - // If we take a copy of a node we can't copy the next and prev pointers, - // since they point to something that does not point at us. This is used - // inside the shouldExpand() "if" in HashTable::add. - LinkedHashSetNodeBase(const LinkedHashSetNodeBase& other) - : m_prev(0), m_next(0) {} - - LinkedHashSetNodeBase(LinkedHashSetNodeBase&& other) - : m_prev(other.m_prev), m_next(other.m_next) { - other.m_prev = nullptr; - other.m_next = nullptr; - if (m_next) { - m_prev->m_next = this; - m_next->m_prev = this; - } - } - - private: - // Should not be used. - LinkedHashSetNodeBase& operator=(const LinkedHashSetNodeBase& other); -}; - -template <typename ValueArg, typename Allocator> -class LinkedHashSetNode : public LinkedHashSetNodeBase { - DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); - - public: - LinkedHashSetNode(const ValueArg& value, - LinkedHashSetNodeBase* prev, - LinkedHashSetNodeBase* next) - : LinkedHashSetNodeBase(prev, next), m_value(value) {} - - LinkedHashSetNode(LinkedHashSetNode&& other) - : LinkedHashSetNodeBase(std::move(other)), - m_value(std::move(other.m_value)) {} - - ValueArg m_value; - - private: - WTF_MAKE_NONCOPYABLE(LinkedHashSetNode); -}; - -template <typename ValueArg, - typename HashFunctions = typename DefaultHash<ValueArg>::Hash, - typename TraitsArg = HashTraits<ValueArg>, - typename Allocator = PartitionAllocator> -class LinkedHashSet { - USE_ALLOCATOR(LinkedHashSet, Allocator); - - private: - typedef ValueArg Value; - typedef TraitsArg Traits; - typedef LinkedHashSetNode<Value, Allocator> Node; - typedef LinkedHashSetNodeBase NodeBase; - typedef LinkedHashSetTranslator<Value, HashFunctions, Allocator> - NodeHashFunctions; - typedef LinkedHashSetTraits<Value, Traits, Allocator> NodeHashTraits; - - typedef HashTable<Node, - Node, - IdentityExtractor, - NodeHashFunctions, - NodeHashTraits, - NodeHashTraits, - Allocator> - ImplType; - - public: - typedef LinkedHashSetIterator<LinkedHashSet> iterator; - friend class LinkedHashSetIterator<LinkedHashSet>; - typedef LinkedHashSetConstIterator<LinkedHashSet> const_iterator; - friend class LinkedHashSetConstIterator<LinkedHashSet>; - - typedef LinkedHashSetReverseIterator<LinkedHashSet> reverse_iterator; - friend class LinkedHashSetReverseIterator<LinkedHashSet>; - typedef LinkedHashSetConstReverseIterator<LinkedHashSet> - const_reverse_iterator; - friend class LinkedHashSetConstReverseIterator<LinkedHashSet>; - - struct AddResult final { - STACK_ALLOCATED(); - AddResult(const typename ImplType::AddResult& hashTableAddResult) - : storedValue(&hashTableAddResult.storedValue->m_value), - isNewEntry(hashTableAddResult.isNewEntry) {} - - Value* storedValue; - bool isNewEntry; - }; - - typedef typename HashTraits<Value>::PeekInType ValuePeekInType; - - LinkedHashSet(); - LinkedHashSet(const LinkedHashSet&); - LinkedHashSet(LinkedHashSet&&); - LinkedHashSet& operator=(const LinkedHashSet&); - LinkedHashSet& operator=(LinkedHashSet&&); - - // Needs finalization. The anchor needs to unlink itself from the chain. - ~LinkedHashSet(); - - static void finalize(void* pointer) { - reinterpret_cast<LinkedHashSet*>(pointer)->~LinkedHashSet(); - } - void finalizeGarbageCollectedObject() { finalize(this); } - - void swap(LinkedHashSet&); - - unsigned size() const { return m_impl.size(); } - unsigned capacity() const { return m_impl.capacity(); } - bool isEmpty() const { return m_impl.isEmpty(); } - - iterator begin() { return makeIterator(firstNode()); } - iterator end() { return makeIterator(anchor()); } - const_iterator begin() const { return makeConstIterator(firstNode()); } - const_iterator end() const { return makeConstIterator(anchor()); } - - reverse_iterator rbegin() { return makeReverseIterator(lastNode()); } - reverse_iterator rend() { return makeReverseIterator(anchor()); } - const_reverse_iterator rbegin() const { - return makeConstReverseIterator(lastNode()); - } - const_reverse_iterator rend() const { - return makeConstReverseIterator(anchor()); - } - - Value& front(); - const Value& front() const; - void removeFirst(); - - Value& back(); - const Value& back() const; - void pop_back(); - - iterator find(ValuePeekInType); - const_iterator find(ValuePeekInType) const; - bool contains(ValuePeekInType) const; - - // An alternate version of find() that finds the object by hashing and - // comparing with some other type, to avoid the cost of type conversion. - // The HashTranslator interface is defined in HashSet. - template <typename HashTranslator, typename T> - iterator find(const T&); - template <typename HashTranslator, typename T> - const_iterator find(const T&) const; - template <typename HashTranslator, typename T> - bool contains(const T&) const; - - // The return value of insert is a pair of a pointer to the stored value, - // and a bool that is true if an new entry was added. - template <typename IncomingValueType> - AddResult insert(IncomingValueType&&); - - // Same as insert() except that the return value is an - // iterator. Useful in cases where it's needed to have the - // same return value as find() and where it's not possible to - // use a pointer to the storedValue. - template <typename IncomingValueType> - iterator addReturnIterator(IncomingValueType&&); - - // Add the value to the end of the collection. If the value was already in - // the list, it is moved to the end. - template <typename IncomingValueType> - AddResult appendOrMoveToLast(IncomingValueType&&); - - // Add the value to the beginning of the collection. If the value was already - // in the list, it is moved to the beginning. - template <typename IncomingValueType> - AddResult prependOrMoveToFirst(IncomingValueType&&); - - template <typename IncomingValueType> - AddResult insertBefore(ValuePeekInType beforeValue, - IncomingValueType&& newValue); - template <typename IncomingValueType> - AddResult insertBefore(iterator it, IncomingValueType&& newValue) { - return m_impl.template add<NodeHashFunctions>( - std::forward<IncomingValueType>(newValue), it.getNode()); - } - - void erase(ValuePeekInType); - void erase(iterator); - void clear() { m_impl.clear(); } - template <typename Collection> - void removeAll(const Collection& other) { - WTF::removeAll(*this, other); - } - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - m_impl.trace(visitor); - // Should the underlying table be moved by GC, register a callback - // that fixes up the interior pointers that the (Heap)LinkedHashSet keeps. - if (m_impl.m_table) { - Allocator::registerBackingStoreCallback( - visitor, m_impl.m_table, moveBackingCallback, - reinterpret_cast<void*>(&m_anchor)); - } - } - - int64_t modifications() const { return m_impl.modifications(); } - void checkModifications(int64_t mods) const { - m_impl.checkModifications(mods); - } - - private: - Node* anchor() { return reinterpret_cast<Node*>(&m_anchor); } - const Node* anchor() const { - return reinterpret_cast<const Node*>(&m_anchor); - } - Node* firstNode() { return reinterpret_cast<Node*>(m_anchor.m_next); } - const Node* firstNode() const { - return reinterpret_cast<const Node*>(m_anchor.m_next); - } - Node* lastNode() { return reinterpret_cast<Node*>(m_anchor.m_prev); } - const Node* lastNode() const { - return reinterpret_cast<const Node*>(m_anchor.m_prev); - } - - iterator makeIterator(const Node* position) { - return iterator(position, this); - } - const_iterator makeConstIterator(const Node* position) const { - return const_iterator(position, this); - } - reverse_iterator makeReverseIterator(const Node* position) { - return reverse_iterator(position, this); - } - const_reverse_iterator makeConstReverseIterator(const Node* position) const { - return const_reverse_iterator(position, this); - } - - static void moveBackingCallback(void* anchor, - void* from, - void* to, - size_t size) { - // Note: the hash table move may have been overlapping; linearly scan the - // entire table and fixup interior pointers into the old region with - // correspondingly offset ones into the new. - size_t tableSize = size / sizeof(Node); - Node* table = reinterpret_cast<Node*>(to); - NodeBase* fromStart = reinterpret_cast<NodeBase*>(from); - NodeBase* fromEnd = - reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(from) + size); - for (Node* element = table + tableSize - 1; element >= table; element--) { - Node& node = *element; - if (ImplType::isEmptyOrDeletedBucket(node)) - continue; - if (node.m_next >= fromStart && node.m_next < fromEnd) { - size_t diff = reinterpret_cast<uintptr_t>(node.m_next) - - reinterpret_cast<uintptr_t>(from); - node.m_next = - reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); - } - if (node.m_prev >= fromStart && node.m_prev < fromEnd) { - size_t diff = reinterpret_cast<uintptr_t>(node.m_prev) - - reinterpret_cast<uintptr_t>(from); - node.m_prev = - reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); - } - } - NodeBase* anchorNode = reinterpret_cast<NodeBase*>(anchor); - if (anchorNode->m_next >= fromStart && anchorNode->m_next < fromEnd) { - size_t diff = reinterpret_cast<uintptr_t>(anchorNode->m_next) - - reinterpret_cast<uintptr_t>(from); - anchorNode->m_next = - reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); - } - if (anchorNode->m_prev >= fromStart && anchorNode->m_prev < fromEnd) { - size_t diff = reinterpret_cast<uintptr_t>(anchorNode->m_prev) - - reinterpret_cast<uintptr_t>(from); - anchorNode->m_prev = - reinterpret_cast<NodeBase*>(reinterpret_cast<uintptr_t>(to) + diff); - } - } - - ImplType m_impl; - NodeBase m_anchor; -}; - -template <typename Value, typename HashFunctions, typename Allocator> -struct LinkedHashSetTranslator { - STATIC_ONLY(LinkedHashSetTranslator); - typedef LinkedHashSetNode<Value, Allocator> Node; - typedef LinkedHashSetNodeBase NodeBase; - typedef typename HashTraits<Value>::PeekInType ValuePeekInType; - static unsigned hash(const Node& node) { - return HashFunctions::hash(node.m_value); - } - static unsigned hash(const ValuePeekInType& key) { - return HashFunctions::hash(key); - } - static bool equal(const Node& a, const ValuePeekInType& b) { - return HashFunctions::equal(a.m_value, b); - } - static bool equal(const Node& a, const Node& b) { - return HashFunctions::equal(a.m_value, b.m_value); - } - template <typename IncomingValueType> - static void translate(Node& location, - IncomingValueType&& key, - NodeBase* anchor) { - anchor->insertBefore(location); - location.m_value = std::forward<IncomingValueType>(key); - } - - // Empty (or deleted) slots have the m_next pointer set to null, but we - // don't do anything to the other fields, which may contain junk. - // Therefore you can't compare a newly constructed empty value with a - // slot and get the right answer. - static const bool safeToCompareToEmptyOrDeleted = false; -}; - -template <typename Value, typename Allocator> -struct LinkedHashSetExtractor { - STATIC_ONLY(LinkedHashSetExtractor); - static const Value& extract(const LinkedHashSetNode<Value, Allocator>& node) { - return node.m_value; - } -}; - -template <typename Value, typename ValueTraitsArg, typename Allocator> -struct LinkedHashSetTraits - : public SimpleClassHashTraits<LinkedHashSetNode<Value, Allocator>> { - STATIC_ONLY(LinkedHashSetTraits); - typedef LinkedHashSetNode<Value, Allocator> Node; - typedef ValueTraitsArg ValueTraits; - - // The slot is empty when the m_next field is zero so it's safe to zero - // the backing. - static const bool emptyValueIsZero = true; - - static const bool hasIsEmptyValueFunction = true; - static bool isEmptyValue(const Node& node) { return !node.m_next; } - - static const int deletedValue = -1; - - static void constructDeletedValue(Node& slot, bool) { - slot.m_next = reinterpret_cast<Node*>(deletedValue); - } - static bool isDeletedValue(const Node& slot) { - return slot.m_next == reinterpret_cast<Node*>(deletedValue); - } - - // Whether we need to trace and do weak processing depends on the traits of - // the type inside the node. - template <typename U = void> - struct IsTraceableInCollection { - STATIC_ONLY(IsTraceableInCollection); - static const bool value = - ValueTraits::template IsTraceableInCollection<>::value; - }; - static const WeakHandlingFlag weakHandlingFlag = - ValueTraits::weakHandlingFlag; -}; - -template <typename LinkedHashSetType> -class LinkedHashSetIterator { - DISALLOW_NEW(); - - private: - typedef typename LinkedHashSetType::Node Node; - typedef typename LinkedHashSetType::Traits Traits; - - typedef typename LinkedHashSetType::Value& ReferenceType; - typedef typename LinkedHashSetType::Value* PointerType; - - typedef LinkedHashSetConstIterator<LinkedHashSetType> const_iterator; - - Node* getNode() { return const_cast<Node*>(m_iterator.getNode()); } - - protected: - LinkedHashSetIterator(const Node* position, LinkedHashSetType* m_container) - : m_iterator(position, m_container) {} - - public: - // Default copy, assignment and destructor are OK. - - PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } - ReferenceType operator*() const { return *get(); } - PointerType operator->() const { return get(); } - - LinkedHashSetIterator& operator++() { - ++m_iterator; - return *this; - } - LinkedHashSetIterator& operator--() { - --m_iterator; - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - // Comparison. - bool operator==(const LinkedHashSetIterator& other) const { - return m_iterator == other.m_iterator; - } - bool operator!=(const LinkedHashSetIterator& other) const { - return m_iterator != other.m_iterator; - } - - operator const_iterator() const { return m_iterator; } - - protected: - const_iterator m_iterator; - template <typename T, typename U, typename V, typename W> - friend class LinkedHashSet; -}; - -template <typename LinkedHashSetType> -class LinkedHashSetConstIterator { - DISALLOW_NEW(); - - private: - typedef typename LinkedHashSetType::Node Node; - typedef typename LinkedHashSetType::Traits Traits; - - typedef const typename LinkedHashSetType::Value& ReferenceType; - typedef const typename LinkedHashSetType::Value* PointerType; - - const Node* getNode() const { return static_cast<const Node*>(m_position); } - - protected: - LinkedHashSetConstIterator(const LinkedHashSetNodeBase* position, - const LinkedHashSetType* container) - : m_position(position) -#if DCHECK_IS_ON() - , - m_container(container), - m_containerModifications(container->modifications()) -#endif - { - } - - public: - PointerType get() const { - checkModifications(); - return &static_cast<const Node*>(m_position)->m_value; - } - ReferenceType operator*() const { return *get(); } - PointerType operator->() const { return get(); } - - LinkedHashSetConstIterator& operator++() { - DCHECK(m_position); - checkModifications(); - m_position = m_position->m_next; - return *this; - } - - LinkedHashSetConstIterator& operator--() { - DCHECK(m_position); - checkModifications(); - m_position = m_position->m_prev; - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - // Comparison. - bool operator==(const LinkedHashSetConstIterator& other) const { - return m_position == other.m_position; - } - bool operator!=(const LinkedHashSetConstIterator& other) const { - return m_position != other.m_position; - } - - private: - const LinkedHashSetNodeBase* m_position; -#if DCHECK_IS_ON() - void checkModifications() const { - m_container->checkModifications(m_containerModifications); - } - const LinkedHashSetType* m_container; - int64_t m_containerModifications; -#else - void checkModifications() const {} -#endif - template <typename T, typename U, typename V, typename W> - friend class LinkedHashSet; - friend class LinkedHashSetIterator<LinkedHashSetType>; -}; - -template <typename LinkedHashSetType> -class LinkedHashSetReverseIterator - : public LinkedHashSetIterator<LinkedHashSetType> { - typedef LinkedHashSetIterator<LinkedHashSetType> Superclass; - typedef LinkedHashSetConstReverseIterator<LinkedHashSetType> - const_reverse_iterator; - typedef typename LinkedHashSetType::Node Node; - - protected: - LinkedHashSetReverseIterator(const Node* position, - LinkedHashSetType* container) - : Superclass(position, container) {} - - public: - LinkedHashSetReverseIterator& operator++() { - Superclass::operator--(); - return *this; - } - LinkedHashSetReverseIterator& operator--() { - Superclass::operator++(); - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - operator const_reverse_iterator() const { - return *reinterpret_cast<const_reverse_iterator*>(this); - } - - template <typename T, typename U, typename V, typename W> - friend class LinkedHashSet; -}; - -template <typename LinkedHashSetType> -class LinkedHashSetConstReverseIterator - : public LinkedHashSetConstIterator<LinkedHashSetType> { - typedef LinkedHashSetConstIterator<LinkedHashSetType> Superclass; - typedef typename LinkedHashSetType::Node Node; - - public: - LinkedHashSetConstReverseIterator(const Node* position, - const LinkedHashSetType* container) - : Superclass(position, container) {} - - LinkedHashSetConstReverseIterator& operator++() { - Superclass::operator--(); - return *this; - } - LinkedHashSetConstReverseIterator& operator--() { - Superclass::operator++(); - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - template <typename T, typename U, typename V, typename W> - friend class LinkedHashSet; -}; - -template <typename T, typename U, typename V, typename Allocator> -inline LinkedHashSet<T, U, V, Allocator>::LinkedHashSet() { - static_assert( - Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<T>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap LinkedHashSet. Use HeapLinkedHashSet<Member<T>> instead."); -} - -template <typename T, typename U, typename V, typename W> -inline LinkedHashSet<T, U, V, W>::LinkedHashSet(const LinkedHashSet& other) - : m_anchor() { - const_iterator end = other.end(); - for (const_iterator it = other.begin(); it != end; ++it) - insert(*it); -} - -template <typename T, typename U, typename V, typename W> -inline LinkedHashSet<T, U, V, W>::LinkedHashSet(LinkedHashSet&& other) - : m_anchor() { - swap(other); -} - -template <typename T, typename U, typename V, typename W> -inline LinkedHashSet<T, U, V, W>& LinkedHashSet<T, U, V, W>::operator=( - const LinkedHashSet& other) { - LinkedHashSet tmp(other); - swap(tmp); - return *this; -} - -template <typename T, typename U, typename V, typename W> -inline LinkedHashSet<T, U, V, W>& LinkedHashSet<T, U, V, W>::operator=( - LinkedHashSet&& other) { - swap(other); - return *this; -} - -template <typename T, typename U, typename V, typename W> -inline void LinkedHashSet<T, U, V, W>::swap(LinkedHashSet& other) { - m_impl.swap(other.m_impl); - swapAnchor(m_anchor, other.m_anchor); -} - -template <typename T, typename U, typename V, typename Allocator> -inline LinkedHashSet<T, U, V, Allocator>::~LinkedHashSet() { - // The destructor of m_anchor will implicitly be called here, which will - // unlink the anchor from the collection. -} - -template <typename T, typename U, typename V, typename W> -inline T& LinkedHashSet<T, U, V, W>::front() { - DCHECK(!isEmpty()); - return firstNode()->m_value; -} - -template <typename T, typename U, typename V, typename W> -inline const T& LinkedHashSet<T, U, V, W>::front() const { - DCHECK(!isEmpty()); - return firstNode()->m_value; -} - -template <typename T, typename U, typename V, typename W> -inline void LinkedHashSet<T, U, V, W>::removeFirst() { - DCHECK(!isEmpty()); - m_impl.remove(static_cast<Node*>(m_anchor.m_next)); -} - -template <typename T, typename U, typename V, typename W> -inline T& LinkedHashSet<T, U, V, W>::back() { - DCHECK(!isEmpty()); - return lastNode()->m_value; -} - -template <typename T, typename U, typename V, typename W> -inline const T& LinkedHashSet<T, U, V, W>::back() const { - DCHECK(!isEmpty()); - return lastNode()->m_value; -} - -template <typename T, typename U, typename V, typename W> -inline void LinkedHashSet<T, U, V, W>::pop_back() { - DCHECK(!isEmpty()); - m_impl.remove(static_cast<Node*>(m_anchor.m_prev)); -} - -template <typename T, typename U, typename V, typename W> -inline typename LinkedHashSet<T, U, V, W>::iterator -LinkedHashSet<T, U, V, W>::find(ValuePeekInType value) { - LinkedHashSet::Node* node = - m_impl.template lookup<LinkedHashSet::NodeHashFunctions, ValuePeekInType>( - value); - if (!node) - return end(); - return makeIterator(node); -} - -template <typename T, typename U, typename V, typename W> -inline typename LinkedHashSet<T, U, V, W>::const_iterator -LinkedHashSet<T, U, V, W>::find(ValuePeekInType value) const { - const LinkedHashSet::Node* node = - m_impl.template lookup<LinkedHashSet::NodeHashFunctions, ValuePeekInType>( - value); - if (!node) - return end(); - return makeConstIterator(node); -} - -template <typename Translator> -struct LinkedHashSetTranslatorAdapter { - STATIC_ONLY(LinkedHashSetTranslatorAdapter); - template <typename T> - static unsigned hash(const T& key) { - return Translator::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return Translator::equal(a.m_value, b); - } -}; - -template <typename Value, typename U, typename V, typename W> -template <typename HashTranslator, typename T> -inline typename LinkedHashSet<Value, U, V, W>::iterator -LinkedHashSet<Value, U, V, W>::find(const T& value) { - typedef LinkedHashSetTranslatorAdapter<HashTranslator> TranslatedFunctions; - const LinkedHashSet::Node* node = - m_impl.template lookup<TranslatedFunctions, const T&>(value); - if (!node) - return end(); - return makeIterator(node); -} - -template <typename Value, typename U, typename V, typename W> -template <typename HashTranslator, typename T> -inline typename LinkedHashSet<Value, U, V, W>::const_iterator -LinkedHashSet<Value, U, V, W>::find(const T& value) const { - typedef LinkedHashSetTranslatorAdapter<HashTranslator> TranslatedFunctions; - const LinkedHashSet::Node* node = - m_impl.template lookup<TranslatedFunctions, const T&>(value); - if (!node) - return end(); - return makeConstIterator(node); -} - -template <typename Value, typename U, typename V, typename W> -template <typename HashTranslator, typename T> -inline bool LinkedHashSet<Value, U, V, W>::contains(const T& value) const { - return m_impl - .template contains<LinkedHashSetTranslatorAdapter<HashTranslator>>(value); -} - -template <typename T, typename U, typename V, typename W> -inline bool LinkedHashSet<T, U, V, W>::contains(ValuePeekInType value) const { - return m_impl.template contains<NodeHashFunctions>(value); -} - -template <typename Value, - typename HashFunctions, - typename Traits, - typename Allocator> -template <typename IncomingValueType> -typename LinkedHashSet<Value, HashFunctions, Traits, Allocator>::AddResult -LinkedHashSet<Value, HashFunctions, Traits, Allocator>::insert( - IncomingValueType&& value) { - return m_impl.template add<NodeHashFunctions>( - std::forward<IncomingValueType>(value), &m_anchor); -} - -template <typename T, typename U, typename V, typename W> -template <typename IncomingValueType> -typename LinkedHashSet<T, U, V, W>::iterator -LinkedHashSet<T, U, V, W>::addReturnIterator(IncomingValueType&& value) { - typename ImplType::AddResult result = m_impl.template add<NodeHashFunctions>( - std::forward<IncomingValueType>(value), &m_anchor); - return makeIterator(result.storedValue); -} - -template <typename T, typename U, typename V, typename W> -template <typename IncomingValueType> -typename LinkedHashSet<T, U, V, W>::AddResult -LinkedHashSet<T, U, V, W>::appendOrMoveToLast(IncomingValueType&& value) { - typename ImplType::AddResult result = m_impl.template add<NodeHashFunctions>( - std::forward<IncomingValueType>(value), &m_anchor); - Node* node = result.storedValue; - if (!result.isNewEntry) { - node->unlink(); - m_anchor.insertBefore(*node); - } - return result; -} - -template <typename T, typename U, typename V, typename W> -template <typename IncomingValueType> -typename LinkedHashSet<T, U, V, W>::AddResult -LinkedHashSet<T, U, V, W>::prependOrMoveToFirst(IncomingValueType&& value) { - typename ImplType::AddResult result = m_impl.template add<NodeHashFunctions>( - std::forward<IncomingValueType>(value), m_anchor.m_next); - Node* node = result.storedValue; - if (!result.isNewEntry) { - node->unlink(); - m_anchor.insertAfter(*node); - } - return result; -} - -template <typename T, typename U, typename V, typename W> -template <typename IncomingValueType> -typename LinkedHashSet<T, U, V, W>::AddResult -LinkedHashSet<T, U, V, W>::insertBefore(ValuePeekInType beforeValue, - IncomingValueType&& newValue) { - return insertBefore(find(beforeValue), - std::forward<IncomingValueType>(newValue)); -} - -template <typename T, typename U, typename V, typename W> -inline void LinkedHashSet<T, U, V, W>::erase(iterator it) { - if (it == end()) - return; - m_impl.remove(it.getNode()); -} - -template <typename T, typename U, typename V, typename W> -inline void LinkedHashSet<T, U, V, W>::erase(ValuePeekInType value) { - erase(find(value)); -} - -inline void swapAnchor(LinkedHashSetNodeBase& a, LinkedHashSetNodeBase& b) { - DCHECK(a.m_prev); - DCHECK(a.m_next); - DCHECK(b.m_prev); - DCHECK(b.m_next); - swap(a.m_prev, b.m_prev); - swap(a.m_next, b.m_next); - if (b.m_next == &a) { - DCHECK_EQ(b.m_prev, &a); - b.m_next = &b; - b.m_prev = &b; - } else { - b.m_next->m_prev = &b; - b.m_prev->m_next = &b; - } - if (a.m_next == &b) { - DCHECK_EQ(a.m_prev, &b); - a.m_next = &a; - a.m_prev = &a; - } else { - a.m_next->m_prev = &a; - a.m_prev->m_next = &a; - } -} - -inline void swap(LinkedHashSetNodeBase& a, LinkedHashSetNodeBase& b) { - DCHECK_NE(a.m_next, &a); - DCHECK_NE(b.m_next, &b); - swap(a.m_prev, b.m_prev); - swap(a.m_next, b.m_next); - if (b.m_next) { - b.m_next->m_prev = &b; - b.m_prev->m_next = &b; - } - if (a.m_next) { - a.m_next->m_prev = &a; - a.m_prev->m_next = &a; - } -} - -template <typename T, typename Allocator> -inline void swap(LinkedHashSetNode<T, Allocator>& a, - LinkedHashSetNode<T, Allocator>& b) { - typedef LinkedHashSetNodeBase Base; - // The key and value cannot be swapped atomically, and it would be - // wrong to have a GC when only one was swapped and the other still - // contained garbage (eg. from a previous use of the same slot). - // Therefore we forbid a GC until both the key and the value are - // swapped. - Allocator::enterGCForbiddenScope(); - swap(static_cast<Base&>(a), static_cast<Base&>(b)); - swap(a.m_value, b.m_value); - Allocator::leaveGCForbiddenScope(); -} - -} // namespace WTF - -using WTF::LinkedHashSet; - -#endif /* WTF_LinkedHashSet_h */ +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/ListHashSet.h b/third_party/WebKit/Source/wtf/ListHashSet.h index 3346381..0cc0ebe0 100644 --- a/third_party/WebKit/Source/wtf/ListHashSet.h +++ b/third_party/WebKit/Source/wtf/ListHashSet.h
@@ -1,1135 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008, 2011, 2012 Apple Inc. All rights - * reserved. - * Copyright (C) 2011, Benjamin Poulain <ikipou@gmail.com> - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_ListHashSet_h -#define WTF_ListHashSet_h +#include "platform/wtf/ListHashSet.h" -#include "wtf/HashSet.h" -#include "wtf/allocator/PartitionAllocator.h" -#include <memory> - -namespace WTF { - -// ListHashSet: Just like HashSet, this class provides a Set interface - a -// collection of unique objects with O(1) insertion, removal and test for -// containership. However, it also has an order - iterating it will always give -// back values in the order in which they are added. - -// Unlike iteration of most WTF Hash data structures, iteration is guaranteed -// safe against mutation of the ListHashSet, except for removal of the item -// currently pointed to by a given iterator. - -template <typename Value, - size_t inlineCapacity, - typename HashFunctions, - typename Allocator> -class ListHashSet; - -template <typename Set> -class ListHashSetIterator; -template <typename Set> -class ListHashSetConstIterator; -template <typename Set> -class ListHashSetReverseIterator; -template <typename Set> -class ListHashSetConstReverseIterator; - -template <typename ValueArg> -class ListHashSetNodeBase; -template <typename ValueArg, typename Allocator> -class ListHashSetNode; -template <typename ValueArg, size_t inlineCapacity> -struct ListHashSetAllocator; - -template <typename HashArg> -struct ListHashSetNodeHashFunctions; -template <typename HashArg> -struct ListHashSetTranslator; - -// Note that for a ListHashSet you cannot specify the HashTraits as a template -// argument. It uses the default hash traits for the ValueArg type. -template <typename ValueArg, - size_t inlineCapacity = 256, - typename HashArg = typename DefaultHash<ValueArg>::Hash, - typename AllocatorArg = - ListHashSetAllocator<ValueArg, inlineCapacity>> -class ListHashSet - : public ConditionalDestructor< - ListHashSet<ValueArg, inlineCapacity, HashArg, AllocatorArg>, - AllocatorArg::isGarbageCollected> { - typedef AllocatorArg Allocator; - USE_ALLOCATOR(ListHashSet, Allocator); - - typedef ListHashSetNode<ValueArg, Allocator> Node; - typedef HashTraits<Node*> NodeTraits; - typedef ListHashSetNodeHashFunctions<HashArg> NodeHash; - typedef ListHashSetTranslator<HashArg> BaseTranslator; - - typedef HashTable<Node*, - Node*, - IdentityExtractor, - NodeHash, - NodeTraits, - NodeTraits, - typename Allocator::TableAllocator> - ImplType; - typedef HashTableIterator<Node*, - Node*, - IdentityExtractor, - NodeHash, - NodeTraits, - NodeTraits, - typename Allocator::TableAllocator> - ImplTypeIterator; - typedef HashTableConstIterator<Node*, - Node*, - IdentityExtractor, - NodeHash, - NodeTraits, - NodeTraits, - typename Allocator::TableAllocator> - ImplTypeConstIterator; - - typedef HashArg HashFunctions; - - public: - typedef ValueArg ValueType; - typedef HashTraits<ValueType> ValueTraits; - typedef typename ValueTraits::PeekInType ValuePeekInType; - - typedef ListHashSetIterator<ListHashSet> iterator; - typedef ListHashSetConstIterator<ListHashSet> const_iterator; - friend class ListHashSetIterator<ListHashSet>; - friend class ListHashSetConstIterator<ListHashSet>; - - typedef ListHashSetReverseIterator<ListHashSet> reverse_iterator; - typedef ListHashSetConstReverseIterator<ListHashSet> const_reverse_iterator; - friend class ListHashSetReverseIterator<ListHashSet>; - friend class ListHashSetConstReverseIterator<ListHashSet>; - - struct AddResult final { - STACK_ALLOCATED(); - friend class ListHashSet<ValueArg, inlineCapacity, HashArg, AllocatorArg>; - AddResult(Node* node, bool isNewEntry) - : storedValue(&node->m_value), isNewEntry(isNewEntry), m_node(node) {} - ValueType* storedValue; - bool isNewEntry; - - private: - Node* m_node; - }; - - ListHashSet(); - ListHashSet(const ListHashSet&); - ListHashSet(ListHashSet&&); - ListHashSet& operator=(const ListHashSet&); - ListHashSet& operator=(ListHashSet&&); - void finalize(); - - void swap(ListHashSet&); - - unsigned size() const { return m_impl.size(); } - unsigned capacity() const { return m_impl.capacity(); } - bool isEmpty() const { return m_impl.isEmpty(); } - - iterator begin() { return makeIterator(m_head); } - iterator end() { return makeIterator(0); } - const_iterator begin() const { return makeConstIterator(m_head); } - const_iterator end() const { return makeConstIterator(0); } - - reverse_iterator rbegin() { return makeReverseIterator(m_tail); } - reverse_iterator rend() { return makeReverseIterator(0); } - const_reverse_iterator rbegin() const { - return makeConstReverseIterator(m_tail); - } - const_reverse_iterator rend() const { return makeConstReverseIterator(0); } - - ValueType& front(); - const ValueType& front() const; - void removeFirst(); - - ValueType& back(); - const ValueType& back() const; - void pop_back(); - - iterator find(ValuePeekInType); - const_iterator find(ValuePeekInType) const; - bool contains(ValuePeekInType) const; - - // An alternate version of find() that finds the object by hashing and - // comparing with some other type, to avoid the cost of type conversion. - // The HashTranslator interface is defined in HashSet. - template <typename HashTranslator, typename T> - iterator find(const T&); - template <typename HashTranslator, typename T> - const_iterator find(const T&) const; - template <typename HashTranslator, typename T> - bool contains(const T&) const; - - // The return value of insert is a pair of a pointer to the stored value, and - // a bool that is true if an new entry was added. - template <typename IncomingValueType> - AddResult insert(IncomingValueType&&); - - // Same as insert() except that the return value is an iterator. Useful in - // cases where it's needed to have the same return value as find() and where - // it's not possible to use a pointer to the storedValue. - template <typename IncomingValueType> - iterator addReturnIterator(IncomingValueType&&); - - // Add the value to the end of the collection. If the value was already in - // the list, it is moved to the end. - template <typename IncomingValueType> - AddResult appendOrMoveToLast(IncomingValueType&&); - - // Add the value to the beginning of the collection. If the value was - // already in the list, it is moved to the beginning. - template <typename IncomingValueType> - AddResult prependOrMoveToFirst(IncomingValueType&&); - - template <typename IncomingValueType> - AddResult insertBefore(ValuePeekInType beforeValue, - IncomingValueType&& newValue); - template <typename IncomingValueType> - AddResult insertBefore(iterator, IncomingValueType&&); - - void erase(ValuePeekInType value) { return erase(find(value)); } - void erase(iterator); - void clear(); - template <typename Collection> - void removeAll(const Collection& other) { - WTF::removeAll(*this, other); - } - - ValueType take(iterator); - ValueType take(ValuePeekInType); - ValueType takeFirst(); - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher); - - private: - void unlink(Node*); - void unlinkAndDelete(Node*); - void appendNode(Node*); - void prependNode(Node*); - void insertNodeBefore(Node* beforeNode, Node* newNode); - void deleteAllNodes(); - Allocator* getAllocator() const { return m_allocatorProvider.get(); } - void createAllocatorIfNeeded() { - m_allocatorProvider.createAllocatorIfNeeded(); - } - void deallocate(Node* node) const { m_allocatorProvider.deallocate(node); } - - iterator makeIterator(Node* position) { return iterator(this, position); } - const_iterator makeConstIterator(Node* position) const { - return const_iterator(this, position); - } - reverse_iterator makeReverseIterator(Node* position) { - return reverse_iterator(this, position); - } - const_reverse_iterator makeConstReverseIterator(Node* position) const { - return const_reverse_iterator(this, position); - } - - ImplType m_impl; - Node* m_head; - Node* m_tail; - typename Allocator::AllocatorProvider m_allocatorProvider; -}; - -// ListHashSetNode has this base class to hold the members because the MSVC -// compiler otherwise gets into circular template dependencies when trying to do -// sizeof on a node. -template <typename ValueArg> -class ListHashSetNodeBase { - DISALLOW_NEW(); - - protected: - template <typename U> - explicit ListHashSetNodeBase(U&& value) : m_value(std::forward<U>(value)) {} - - public: - ValueArg m_value; - ListHashSetNodeBase* m_prev = nullptr; - ListHashSetNodeBase* m_next = nullptr; -#if DCHECK_IS_ON() - bool m_isAllocated = true; -#endif -}; - -// This allocator is only used for non-Heap ListHashSets. -template <typename ValueArg, size_t inlineCapacity> -struct ListHashSetAllocator : public PartitionAllocator { - typedef PartitionAllocator TableAllocator; - typedef ListHashSetNode<ValueArg, ListHashSetAllocator> Node; - typedef ListHashSetNodeBase<ValueArg> NodeBase; - - class AllocatorProvider { - DISALLOW_NEW(); - - public: - AllocatorProvider() : m_allocator(nullptr) {} - void createAllocatorIfNeeded() { - if (!m_allocator) - m_allocator = new ListHashSetAllocator; - } - - void releaseAllocator() { - delete m_allocator; - m_allocator = nullptr; - } - - void swap(AllocatorProvider& other) { - std::swap(m_allocator, other.m_allocator); - } - - void deallocate(Node* node) const { - DCHECK(m_allocator); - m_allocator->deallocate(node); - } - - ListHashSetAllocator* get() const { - DCHECK(m_allocator); - return m_allocator; - } - - private: - // Not using std::unique_ptr as this pointer should be deleted at - // releaseAllocator() method rather than at destructor. - ListHashSetAllocator* m_allocator; - }; - - ListHashSetAllocator() - : m_freeList(pool()), m_isDoneWithInitialFreeList(false) { - memset(m_pool.buffer, 0, sizeof(m_pool.buffer)); - } - - Node* allocateNode() { - Node* result = m_freeList; - - if (!result) - return static_cast<Node*>(WTF::Partitions::fastMalloc( - sizeof(NodeBase), WTF_HEAP_PROFILER_TYPE_NAME(Node))); - -#if DCHECK_IS_ON() - DCHECK(!result->m_isAllocated); -#endif - - Node* next = result->next(); -#if DCHECK_IS_ON() - DCHECK(!next || !next->m_isAllocated); -#endif - if (!next && !m_isDoneWithInitialFreeList) { - next = result + 1; - if (next == pastPool()) { - m_isDoneWithInitialFreeList = true; - next = nullptr; - } else { - DCHECK(inPool(next)); -#if DCHECK_IS_ON() - DCHECK(!next->m_isAllocated); -#endif - } - } - m_freeList = next; - - return result; - } - - void deallocate(Node* node) { - if (inPool(node)) { -#if DCHECK_IS_ON() - node->m_isAllocated = false; -#endif - node->m_next = m_freeList; - m_freeList = node; - return; - } - - WTF::Partitions::fastFree(node); - } - - bool inPool(Node* node) { return node >= pool() && node < pastPool(); } - - static void traceValue(typename PartitionAllocator::Visitor* visitor, - Node* node) {} - - private: - Node* pool() { return reinterpret_cast_ptr<Node*>(m_pool.buffer); } - Node* pastPool() { return pool() + m_poolSize; } - - Node* m_freeList; - bool m_isDoneWithInitialFreeList; -#if defined(MEMORY_SANITIZER_INITIAL_SIZE) - // The allocation pool for nodes is one big chunk that ASAN has no insight - // into, so it can cloak errors. Make it as small as possible to force nodes - // to be allocated individually where ASAN can see them. - static const size_t m_poolSize = 1; -#else - static const size_t m_poolSize = inlineCapacity; -#endif - AlignedBuffer<sizeof(NodeBase) * m_poolSize, WTF_ALIGN_OF(NodeBase)> m_pool; -}; - -template <typename ValueArg, typename AllocatorArg> -class ListHashSetNode : public ListHashSetNodeBase<ValueArg> { - public: - typedef AllocatorArg NodeAllocator; - typedef ValueArg Value; - - template <typename U> - ListHashSetNode(U&& value) - : ListHashSetNodeBase<ValueArg>(std::forward<U>(value)) {} - - void* operator new(size_t, NodeAllocator* allocator) { - static_assert( - sizeof(ListHashSetNode) == sizeof(ListHashSetNodeBase<ValueArg>), - "please add any fields to the base"); - return allocator->allocateNode(); - } - - void setWasAlreadyDestructed() { - if (NodeAllocator::isGarbageCollected && - !IsTriviallyDestructible<ValueArg>::value) - this->m_prev = unlinkedNodePointer(); - } - - bool wasAlreadyDestructed() const { - DCHECK(NodeAllocator::isGarbageCollected); - return this->m_prev == unlinkedNodePointer(); - } - - static void finalize(void* pointer) { - // No need to waste time calling finalize if it's not needed. - DCHECK(!IsTriviallyDestructible<ValueArg>::value); - ListHashSetNode* self = reinterpret_cast_ptr<ListHashSetNode*>(pointer); - - // Check whether this node was already destructed before being unlinked - // from the collection. - if (self->wasAlreadyDestructed()) - return; - - self->m_value.~ValueArg(); - } - void finalizeGarbageCollectedObject() { finalize(this); } - - void destroy(NodeAllocator* allocator) { - this->~ListHashSetNode(); - setWasAlreadyDestructed(); - allocator->deallocate(this); - } - - // This is not called in normal tracing, but it is called if we find a - // pointer to a node on the stack using conservative scanning. Since the - // original ListHashSet may no longer exist we make sure to mark the - // neighbours in the chain too. - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - // The conservative stack scan can find nodes that have been removed - // from the set and destructed. We don't need to trace these, and it - // would be wrong to do so, because the class will not expect the trace - // method to be called after the destructor. It's an error to remove a - // node from the ListHashSet while an iterator is positioned at that - // node, so there should be no valid pointers from the stack to a - // destructed node. - if (wasAlreadyDestructed()) - return; - NodeAllocator::traceValue(visitor, this); - visitor->mark(next()); - visitor->mark(prev()); - } - - ListHashSetNode* next() const { - return reinterpret_cast<ListHashSetNode*>(this->m_next); - } - ListHashSetNode* prev() const { - return reinterpret_cast<ListHashSetNode*>(this->m_prev); - } - - // Don't add fields here, the ListHashSetNodeBase and this should have the - // same size. - - static ListHashSetNode* unlinkedNodePointer() { - return reinterpret_cast<ListHashSetNode*>(-1); - } - - template <typename HashArg> - friend struct ListHashSetNodeHashFunctions; -}; - -template <typename HashArg> -struct ListHashSetNodeHashFunctions { - STATIC_ONLY(ListHashSetNodeHashFunctions); - template <typename T> - static unsigned hash(const T& key) { - return HashArg::hash(key->m_value); - } - template <typename T> - static bool equal(const T& a, const T& b) { - return HashArg::equal(a->m_value, b->m_value); - } - static const bool safeToCompareToEmptyOrDeleted = false; -}; - -template <typename Set> -class ListHashSetIterator { - DISALLOW_NEW(); - - private: - typedef typename Set::const_iterator const_iterator; - typedef typename Set::Node Node; - typedef typename Set::ValueType ValueType; - typedef ValueType& ReferenceType; - typedef ValueType* PointerType; - - ListHashSetIterator(const Set* set, Node* position) - : m_iterator(set, position) {} - - public: - ListHashSetIterator() {} - - // default copy, assignment and destructor are OK - - PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } - ReferenceType operator*() const { return *get(); } - PointerType operator->() const { return get(); } - - ListHashSetIterator& operator++() { - ++m_iterator; - return *this; - } - ListHashSetIterator& operator--() { - --m_iterator; - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - // Comparison. - bool operator==(const ListHashSetIterator& other) const { - return m_iterator == other.m_iterator; - } - bool operator!=(const ListHashSetIterator& other) const { - return m_iterator != other.m_iterator; - } - - operator const_iterator() const { return m_iterator; } - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - m_iterator.trace(visitor); - } - - private: - Node* getNode() { return m_iterator.getNode(); } - - const_iterator m_iterator; - - template <typename T, size_t inlineCapacity, typename U, typename V> - friend class ListHashSet; -}; - -template <typename Set> -class ListHashSetConstIterator { - DISALLOW_NEW(); - - private: - typedef typename Set::const_iterator const_iterator; - typedef typename Set::Node Node; - typedef typename Set::ValueType ValueType; - typedef const ValueType& ReferenceType; - typedef const ValueType* PointerType; - - friend class ListHashSetIterator<Set>; - - ListHashSetConstIterator(const Set* set, Node* position) - : m_set(set), m_position(position) {} - - public: - ListHashSetConstIterator() {} - - PointerType get() const { return &m_position->m_value; } - ReferenceType operator*() const { return *get(); } - PointerType operator->() const { return get(); } - - ListHashSetConstIterator& operator++() { - DCHECK(m_position); - m_position = m_position->next(); - return *this; - } - - ListHashSetConstIterator& operator--() { - DCHECK_NE(m_position, m_set->m_head); - if (!m_position) - m_position = m_set->m_tail; - else - m_position = m_position->prev(); - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - // Comparison. - bool operator==(const ListHashSetConstIterator& other) const { - return m_position == other.m_position; - } - bool operator!=(const ListHashSetConstIterator& other) const { - return m_position != other.m_position; - } - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - visitor->trace(*m_set); - visitor->trace(m_position); - } - - private: - Node* getNode() { return m_position; } - - const Set* m_set; - Node* m_position; - - template <typename T, size_t inlineCapacity, typename U, typename V> - friend class ListHashSet; -}; - -template <typename Set> -class ListHashSetReverseIterator { - DISALLOW_NEW(); - - private: - typedef typename Set::const_reverse_iterator const_reverse_iterator; - typedef typename Set::Node Node; - typedef typename Set::ValueType ValueType; - typedef ValueType& ReferenceType; - typedef ValueType* PointerType; - - ListHashSetReverseIterator(const Set* set, Node* position) - : m_iterator(set, position) {} - - public: - ListHashSetReverseIterator() {} - - // default copy, assignment and destructor are OK - - PointerType get() const { return const_cast<PointerType>(m_iterator.get()); } - ReferenceType operator*() const { return *get(); } - PointerType operator->() const { return get(); } - - ListHashSetReverseIterator& operator++() { - ++m_iterator; - return *this; - } - ListHashSetReverseIterator& operator--() { - --m_iterator; - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - // Comparison. - bool operator==(const ListHashSetReverseIterator& other) const { - return m_iterator == other.m_iterator; - } - bool operator!=(const ListHashSetReverseIterator& other) const { - return m_iterator != other.m_iterator; - } - - operator const_reverse_iterator() const { return m_iterator; } - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - m_iterator.trace(visitor); - } - - private: - Node* getNode() { return m_iterator.node(); } - - const_reverse_iterator m_iterator; - - template <typename T, size_t inlineCapacity, typename U, typename V> - friend class ListHashSet; -}; - -template <typename Set> -class ListHashSetConstReverseIterator { - DISALLOW_NEW(); - - private: - typedef typename Set::reverse_iterator reverse_iterator; - typedef typename Set::Node Node; - typedef typename Set::ValueType ValueType; - typedef const ValueType& ReferenceType; - typedef const ValueType* PointerType; - - friend class ListHashSetReverseIterator<Set>; - - ListHashSetConstReverseIterator(const Set* set, Node* position) - : m_set(set), m_position(position) {} - - public: - ListHashSetConstReverseIterator() {} - - PointerType get() const { return &m_position->m_value; } - ReferenceType operator*() const { return *get(); } - PointerType operator->() const { return get(); } - - ListHashSetConstReverseIterator& operator++() { - DCHECK(m_position); - m_position = m_position->prev(); - return *this; - } - - ListHashSetConstReverseIterator& operator--() { - DCHECK_NE(m_position, m_set->m_tail); - if (!m_position) - m_position = m_set->m_head; - else - m_position = m_position->next(); - return *this; - } - - // Postfix ++ and -- intentionally omitted. - - // Comparison. - bool operator==(const ListHashSetConstReverseIterator& other) const { - return m_position == other.m_position; - } - bool operator!=(const ListHashSetConstReverseIterator& other) const { - return m_position != other.m_position; - } - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher visitor) { - visitor->trace(*m_set); - visitor->trace(m_position); - } - - private: - Node* getNode() { return m_position; } - - const Set* m_set; - Node* m_position; - - template <typename T, size_t inlineCapacity, typename U, typename V> - friend class ListHashSet; -}; - -template <typename HashFunctions> -struct ListHashSetTranslator { - STATIC_ONLY(ListHashSetTranslator); - template <typename T> - static unsigned hash(const T& key) { - return HashFunctions::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return HashFunctions::equal(a->m_value, b); - } - template <typename T, typename U, typename V> - static void translate(T*& location, U&& key, const V& allocator) { - location = new (const_cast<V*>(&allocator)) T(std::forward<U>(key)); - } -}; - -template <typename T, size_t inlineCapacity, typename U, typename Allocator> -inline ListHashSet<T, inlineCapacity, U, Allocator>::ListHashSet() - : m_head(nullptr), m_tail(nullptr) { - static_assert( - Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<T>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap ListHashSet. Use HeapListHashSet<Member<T>> instead."); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline ListHashSet<T, inlineCapacity, U, V>::ListHashSet( - const ListHashSet& other) - : m_head(nullptr), m_tail(nullptr) { - const_iterator end = other.end(); - for (const_iterator it = other.begin(); it != end; ++it) - insert(*it); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline ListHashSet<T, inlineCapacity, U, V>::ListHashSet(ListHashSet&& other) - : m_head(nullptr), m_tail(nullptr) { - swap(other); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline ListHashSet<T, inlineCapacity, U, V>& -ListHashSet<T, inlineCapacity, U, V>::operator=(const ListHashSet& other) { - ListHashSet tmp(other); - swap(tmp); - return *this; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline ListHashSet<T, inlineCapacity, U, V>& -ListHashSet<T, inlineCapacity, U, V>::operator=(ListHashSet&& other) { - swap(other); - return *this; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline void ListHashSet<T, inlineCapacity, U, V>::swap(ListHashSet& other) { - m_impl.swap(other.m_impl); - std::swap(m_head, other.m_head); - std::swap(m_tail, other.m_tail); - m_allocatorProvider.swap(other.m_allocatorProvider); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline void ListHashSet<T, inlineCapacity, U, V>::finalize() { - static_assert(!Allocator::isGarbageCollected, - "heap allocated ListHashSet should never call finalize()"); - deleteAllNodes(); - m_allocatorProvider.releaseAllocator(); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline T& ListHashSet<T, inlineCapacity, U, V>::front() { - DCHECK(!isEmpty()); - return m_head->m_value; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline void ListHashSet<T, inlineCapacity, U, V>::removeFirst() { - DCHECK(!isEmpty()); - m_impl.remove(m_head); - unlinkAndDelete(m_head); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline const T& ListHashSet<T, inlineCapacity, U, V>::front() const { - DCHECK(!isEmpty()); - return m_head->m_value; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline T& ListHashSet<T, inlineCapacity, U, V>::back() { - DCHECK(!isEmpty()); - return m_tail->m_value; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline const T& ListHashSet<T, inlineCapacity, U, V>::back() const { - DCHECK(!isEmpty()); - return m_tail->m_value; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline void ListHashSet<T, inlineCapacity, U, V>::pop_back() { - DCHECK(!isEmpty()); - m_impl.remove(m_tail); - unlinkAndDelete(m_tail); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline typename ListHashSet<T, inlineCapacity, U, V>::iterator -ListHashSet<T, inlineCapacity, U, V>::find(ValuePeekInType value) { - ImplTypeIterator it = m_impl.template find<BaseTranslator>(value); - if (it == m_impl.end()) - return end(); - return makeIterator(*it); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline typename ListHashSet<T, inlineCapacity, U, V>::const_iterator -ListHashSet<T, inlineCapacity, U, V>::find(ValuePeekInType value) const { - ImplTypeConstIterator it = m_impl.template find<BaseTranslator>(value); - if (it == m_impl.end()) - return end(); - return makeConstIterator(*it); -} - -template <typename Translator> -struct ListHashSetTranslatorAdapter { - STATIC_ONLY(ListHashSetTranslatorAdapter); - template <typename T> - static unsigned hash(const T& key) { - return Translator::hash(key); - } - template <typename T, typename U> - static bool equal(const T& a, const U& b) { - return Translator::equal(a->m_value, b); - } -}; - -template <typename ValueType, size_t inlineCapacity, typename U, typename V> -template <typename HashTranslator, typename T> -inline typename ListHashSet<ValueType, inlineCapacity, U, V>::iterator -ListHashSet<ValueType, inlineCapacity, U, V>::find(const T& value) { - ImplTypeConstIterator it = - m_impl.template find<ListHashSetTranslatorAdapter<HashTranslator>>(value); - if (it == m_impl.end()) - return end(); - return makeIterator(*it); -} - -template <typename ValueType, size_t inlineCapacity, typename U, typename V> -template <typename HashTranslator, typename T> -inline typename ListHashSet<ValueType, inlineCapacity, U, V>::const_iterator -ListHashSet<ValueType, inlineCapacity, U, V>::find(const T& value) const { - ImplTypeConstIterator it = - m_impl.template find<ListHashSetTranslatorAdapter<HashTranslator>>(value); - if (it == m_impl.end()) - return end(); - return makeConstIterator(*it); -} - -template <typename ValueType, size_t inlineCapacity, typename U, typename V> -template <typename HashTranslator, typename T> -inline bool ListHashSet<ValueType, inlineCapacity, U, V>::contains( - const T& value) const { - return m_impl.template contains<ListHashSetTranslatorAdapter<HashTranslator>>( - value); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline bool ListHashSet<T, inlineCapacity, U, V>::contains( - ValuePeekInType value) const { - return m_impl.template contains<BaseTranslator>(value); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename IncomingValueType> -typename ListHashSet<T, inlineCapacity, U, V>::AddResult -ListHashSet<T, inlineCapacity, U, V>::insert(IncomingValueType&& value) { - createAllocatorIfNeeded(); - // The second argument is a const ref. This is useful for the HashTable - // because it lets it take lvalues by reference, but for our purposes it's - // inconvenient, since it constrains us to be const, whereas the allocator - // actually changes when it does allocations. - auto result = m_impl.template add<BaseTranslator>( - std::forward<IncomingValueType>(value), *this->getAllocator()); - if (result.isNewEntry) - appendNode(*result.storedValue); - return AddResult(*result.storedValue, result.isNewEntry); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename IncomingValueType> -typename ListHashSet<T, inlineCapacity, U, V>::iterator -ListHashSet<T, inlineCapacity, U, V>::addReturnIterator( - IncomingValueType&& value) { - return makeIterator(insert(std::forward<IncomingValueType>(value)).m_node); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename IncomingValueType> -typename ListHashSet<T, inlineCapacity, U, V>::AddResult -ListHashSet<T, inlineCapacity, U, V>::appendOrMoveToLast( - IncomingValueType&& value) { - createAllocatorIfNeeded(); - auto result = m_impl.template add<BaseTranslator>( - std::forward<IncomingValueType>(value), *this->getAllocator()); - Node* node = *result.storedValue; - if (!result.isNewEntry) - unlink(node); - appendNode(node); - return AddResult(*result.storedValue, result.isNewEntry); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename IncomingValueType> -typename ListHashSet<T, inlineCapacity, U, V>::AddResult -ListHashSet<T, inlineCapacity, U, V>::prependOrMoveToFirst( - IncomingValueType&& value) { - createAllocatorIfNeeded(); - auto result = m_impl.template add<BaseTranslator>( - std::forward<IncomingValueType>(value), *this->getAllocator()); - Node* node = *result.storedValue; - if (!result.isNewEntry) - unlink(node); - prependNode(node); - return AddResult(*result.storedValue, result.isNewEntry); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename IncomingValueType> -typename ListHashSet<T, inlineCapacity, U, V>::AddResult -ListHashSet<T, inlineCapacity, U, V>::insertBefore( - iterator it, - IncomingValueType&& newValue) { - createAllocatorIfNeeded(); - auto result = m_impl.template add<BaseTranslator>( - std::forward<IncomingValueType>(newValue), *this->getAllocator()); - if (result.isNewEntry) - insertNodeBefore(it.getNode(), *result.storedValue); - return AddResult(*result.storedValue, result.isNewEntry); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename IncomingValueType> -typename ListHashSet<T, inlineCapacity, U, V>::AddResult -ListHashSet<T, inlineCapacity, U, V>::insertBefore( - ValuePeekInType beforeValue, - IncomingValueType&& newValue) { - createAllocatorIfNeeded(); - return insertBefore(find(beforeValue), - std::forward<IncomingValueType>(newValue)); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline void ListHashSet<T, inlineCapacity, U, V>::erase(iterator it) { - if (it == end()) - return; - m_impl.remove(it.getNode()); - unlinkAndDelete(it.getNode()); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -inline void ListHashSet<T, inlineCapacity, U, V>::clear() { - deleteAllNodes(); - m_impl.clear(); - m_head = nullptr; - m_tail = nullptr; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -auto ListHashSet<T, inlineCapacity, U, V>::take(iterator it) -> ValueType { - if (it == end()) - return ValueTraits::emptyValue(); - - m_impl.remove(it.getNode()); - ValueType result = std::move(it.getNode()->m_value); - unlinkAndDelete(it.getNode()); - - return result; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -auto ListHashSet<T, inlineCapacity, U, V>::take(ValuePeekInType value) - -> ValueType { - return take(find(value)); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -auto ListHashSet<T, inlineCapacity, U, V>::takeFirst() -> ValueType { - DCHECK(!isEmpty()); - m_impl.remove(m_head); - ValueType result = std::move(m_head->m_value); - unlinkAndDelete(m_head); - - return result; -} - -template <typename T, size_t inlineCapacity, typename U, typename Allocator> -void ListHashSet<T, inlineCapacity, U, Allocator>::unlink(Node* node) { - if (!node->m_prev) { - DCHECK_EQ(node, m_head); - m_head = node->next(); - } else { - DCHECK_NE(node, m_head); - node->m_prev->m_next = node->m_next; - } - - if (!node->m_next) { - DCHECK_EQ(node, m_tail); - m_tail = node->prev(); - } else { - DCHECK_NE(node, m_tail); - node->m_next->m_prev = node->m_prev; - } -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -void ListHashSet<T, inlineCapacity, U, V>::unlinkAndDelete(Node* node) { - unlink(node); - node->destroy(this->getAllocator()); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -void ListHashSet<T, inlineCapacity, U, V>::appendNode(Node* node) { - node->m_prev = m_tail; - node->m_next = nullptr; - - if (m_tail) { - DCHECK(m_head); - m_tail->m_next = node; - } else { - DCHECK(!m_head); - m_head = node; - } - - m_tail = node; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -void ListHashSet<T, inlineCapacity, U, V>::prependNode(Node* node) { - node->m_prev = nullptr; - node->m_next = m_head; - - if (m_head) - m_head->m_prev = node; - else - m_tail = node; - - m_head = node; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -void ListHashSet<T, inlineCapacity, U, V>::insertNodeBefore(Node* beforeNode, - Node* newNode) { - if (!beforeNode) - return appendNode(newNode); - - newNode->m_next = beforeNode; - newNode->m_prev = beforeNode->m_prev; - if (beforeNode->m_prev) - beforeNode->m_prev->m_next = newNode; - beforeNode->m_prev = newNode; - - if (!newNode->m_prev) - m_head = newNode; -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -void ListHashSet<T, inlineCapacity, U, V>::deleteAllNodes() { - if (!m_head) - return; - - for (Node *node = m_head, *next = m_head->next(); node; - node = next, next = node ? node->next() : 0) - node->destroy(this->getAllocator()); -} - -template <typename T, size_t inlineCapacity, typename U, typename V> -template <typename VisitorDispatcher> -void ListHashSet<T, inlineCapacity, U, V>::trace(VisitorDispatcher visitor) { - static_assert(HashTraits<T>::weakHandlingFlag == NoWeakHandlingInCollections, - "HeapListHashSet does not support weakness, consider using " - "HeapLinkedHashSet instead."); - // This marks all the nodes and their contents live that can be accessed - // through the HashTable. That includes m_head and m_tail so we do not have - // to explicitly trace them here. - m_impl.trace(visitor); -} - -} // namespace WTF - -using WTF::ListHashSet; - -#endif // WTF_ListHashSet_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/PrintStream.h b/third_party/WebKit/Source/wtf/PrintStream.h index fc30724..6c4bbda 100644 --- a/third_party/WebKit/Source/wtf/PrintStream.h +++ b/third_party/WebKit/Source/wtf/PrintStream.h
@@ -1,134 +1,9 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef PrintStream_h -#define PrintStream_h +#include "platform/wtf/PrintStream.h" -#include "wtf/Allocator.h" -#include "wtf/Compiler.h" -#include "wtf/Noncopyable.h" -#include "wtf/StdLibExtras.h" -#include "wtf/WTFExport.h" -#include <stdarg.h> - -namespace WTF { - -class CString; -class String; - -class WTF_EXPORT PrintStream { - USING_FAST_MALLOC(PrintStream); - WTF_MAKE_NONCOPYABLE(PrintStream); - - public: - PrintStream(); - virtual ~PrintStream(); - - PRINTF_FORMAT(2, 3) void printf(const char* format, ...); - PRINTF_FORMAT(2, 0) virtual void vprintf(const char* format, va_list) = 0; - - // Typically a no-op for many subclasses of PrintStream, this is a hint that - // the implementation should flush its buffers if it had not done so already. - virtual void flush(); - - template <typename T> - void print(const T& value) { - printInternal(*this, value); - } - - template <typename T1, typename... RemainingTypes> - void print(const T1& value1, const RemainingTypes&... values) { - print(value1); - print(values...); - } -}; - -WTF_EXPORT void printInternal(PrintStream&, const char*); -WTF_EXPORT void printInternal(PrintStream&, const CString&); -WTF_EXPORT void printInternal(PrintStream&, const String&); -inline void printInternal(PrintStream& out, char* value) { - printInternal(out, static_cast<const char*>(value)); -} -inline void printInternal(PrintStream& out, CString& value) { - printInternal(out, static_cast<const CString&>(value)); -} -inline void printInternal(PrintStream& out, String& value) { - printInternal(out, static_cast<const String&>(value)); -} -WTF_EXPORT void printInternal(PrintStream&, bool); -WTF_EXPORT void printInternal(PrintStream&, int); -WTF_EXPORT void printInternal(PrintStream&, unsigned); -WTF_EXPORT void printInternal(PrintStream&, long); -WTF_EXPORT void printInternal(PrintStream&, unsigned long); -WTF_EXPORT void printInternal(PrintStream&, long long); -WTF_EXPORT void printInternal(PrintStream&, unsigned long long); -WTF_EXPORT void printInternal(PrintStream&, float); -WTF_EXPORT void printInternal(PrintStream&, double); - -template <typename T> -void printInternal(PrintStream& out, const T& value) { - value.dump(out); -} - -#define MAKE_PRINT_ADAPTOR(Name, Type, function) \ - class Name final { \ - STACK_ALLOCATED(); \ - \ - public: \ - Name(const Type& value) : m_value(value) {} \ - void dump(PrintStream& out) const { function(out, m_value); } \ - \ - private: \ - Type m_value; \ - } - -#define MAKE_PRINT_METHOD_ADAPTOR(Name, Type, method) \ - class Name final { \ - STACK_ALLOCATED(); \ - \ - public: \ - Name(const Type& value) : m_value(value) {} \ - void dump(PrintStream& out) const { m_value.method(out); } \ - \ - private: \ - const Type& m_value; \ - } - -#define MAKE_PRINT_METHOD(Type, dumpMethod, method) \ - MAKE_PRINT_METHOD_ADAPTOR(DumperFor_##method, Type, dumpMethod); \ - DumperFor_##method method() const { return DumperFor_##method(*this); } - -// Use an adaptor-based dumper for characters to avoid situations where -// you've "compressed" an integer to a character and it ends up printing -// as ASCII when you wanted it to print as a number. -void dumpCharacter(PrintStream&, char); -MAKE_PRINT_ADAPTOR(CharacterDump, char, dumpCharacter); - -} // namespace WTF - -using WTF::CharacterDump; -using WTF::PrintStream; - -#endif // PrintStream_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/Vector.h b/third_party/WebKit/Source/wtf/Vector.h index 1a7bba13..eb205364 100644 --- a/third_party/WebKit/Source/wtf/Vector.h +++ b/third_party/WebKit/Source/wtf/Vector.h
@@ -1,1910 +1,9 @@ -/* - * Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_Vector_h -#define WTF_Vector_h +#include "platform/wtf/Vector.h" -#include "wtf/Alignment.h" -#include "wtf/ConditionalDestructor.h" -#include "wtf/ContainerAnnotations.h" -#include "wtf/Noncopyable.h" -#include "wtf/NotFound.h" -#include "wtf/StdLibExtras.h" -#include "wtf/VectorTraits.h" -#include "wtf/allocator/PartitionAllocator.h" -#include <algorithm> -#include <initializer_list> -#include <iterator> -#include <string.h> -#include <utility> - -// For ASAN builds, disable inline buffers completely as they cause various -// issues. -#ifdef ANNOTATE_CONTIGUOUS_CONTAINER -#define INLINE_CAPACITY 0 -#else -#define INLINE_CAPACITY inlineCapacity -#endif - -namespace WTF { - -#if defined(MEMORY_SANITIZER_INITIAL_SIZE) -static const size_t kInitialVectorSize = 1; -#else -#ifndef WTF_VECTOR_INITIAL_SIZE -#define WTF_VECTOR_INITIAL_SIZE 4 -#endif -static const size_t kInitialVectorSize = WTF_VECTOR_INITIAL_SIZE; -#endif - -template <typename T, size_t inlineBuffer, typename Allocator> -class Deque; - -// -// Vector Traits -// - -// Bunch of traits for Vector are defined here, with which you can customize -// Vector's behavior. In most cases the default traits are appropriate, so you -// usually don't have to specialize those traits by yourself. -// -// The behavior of the implementation below can be controlled by VectorTraits. -// If you want to change the behavior of your type, take a look at VectorTraits -// (defined in VectorTraits.h), too. - -template <bool needsDestruction, typename T> -struct VectorDestructor; - -template <typename T> -struct VectorDestructor<false, T> { - STATIC_ONLY(VectorDestructor); - static void destruct(T*, T*) {} -}; - -template <typename T> -struct VectorDestructor<true, T> { - STATIC_ONLY(VectorDestructor); - static void destruct(T* begin, T* end) { - for (T* cur = begin; cur != end; ++cur) - cur->~T(); - } -}; - -template <bool unusedSlotsMustBeZeroed, typename T> -struct VectorUnusedSlotClearer; - -template <typename T> -struct VectorUnusedSlotClearer<false, T> { - STATIC_ONLY(VectorUnusedSlotClearer); - static void clear(T*, T*) {} -#if DCHECK_IS_ON() - static void checkCleared(const T*, const T*) {} -#endif -}; - -template <typename T> -struct VectorUnusedSlotClearer<true, T> { - STATIC_ONLY(VectorUnusedSlotClearer); - static void clear(T* begin, T* end) { - memset(reinterpret_cast<void*>(begin), 0, sizeof(T) * (end - begin)); - } - -#if DCHECK_IS_ON() - static void checkCleared(const T* begin, const T* end) { - const unsigned char* unusedArea = - reinterpret_cast<const unsigned char*>(begin); - const unsigned char* endAddress = - reinterpret_cast<const unsigned char*>(end); - DCHECK_GE(endAddress, unusedArea); - for (int i = 0; i < endAddress - unusedArea; ++i) - DCHECK(!unusedArea[i]); - } -#endif -}; - -template <bool canInitializeWithMemset, typename T> -struct VectorInitializer; - -template <typename T> -struct VectorInitializer<false, T> { - STATIC_ONLY(VectorInitializer); - static void initialize(T* begin, T* end) { - for (T* cur = begin; cur != end; ++cur) - new (NotNull, cur) T; - } -}; - -template <typename T> -struct VectorInitializer<true, T> { - STATIC_ONLY(VectorInitializer); - static void initialize(T* begin, T* end) { - memset(begin, 0, - reinterpret_cast<char*>(end) - reinterpret_cast<char*>(begin)); - } -}; - -template <bool canMoveWithMemcpy, typename T> -struct VectorMover; - -template <typename T> -struct VectorMover<false, T> { - STATIC_ONLY(VectorMover); - static void move(T* src, T* srcEnd, T* dst) { - while (src != srcEnd) { - new (NotNull, dst) T(std::move(*src)); - src->~T(); - ++dst; - ++src; - } - } - static void moveOverlapping(T* src, T* srcEnd, T* dst) { - if (src > dst) { - move(src, srcEnd, dst); - } else { - T* dstEnd = dst + (srcEnd - src); - while (src != srcEnd) { - --srcEnd; - --dstEnd; - new (NotNull, dstEnd) T(std::move(*srcEnd)); - srcEnd->~T(); - } - } - } - static void swap(T* src, T* srcEnd, T* dst) { - std::swap_ranges(src, srcEnd, dst); - } -}; - -template <typename T> -struct VectorMover<true, T> { - STATIC_ONLY(VectorMover); - static void move(const T* src, const T* srcEnd, T* dst) { - if (LIKELY(dst && src)) - memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - - reinterpret_cast<const char*>(src)); - } - static void moveOverlapping(const T* src, const T* srcEnd, T* dst) { - if (LIKELY(dst && src)) - memmove(dst, src, reinterpret_cast<const char*>(srcEnd) - - reinterpret_cast<const char*>(src)); - } - static void swap(T* src, T* srcEnd, T* dst) { - std::swap_ranges(reinterpret_cast<char*>(src), - reinterpret_cast<char*>(srcEnd), - reinterpret_cast<char*>(dst)); - } -}; - -template <bool canCopyWithMemcpy, typename T> -struct VectorCopier; - -template <typename T> -struct VectorCopier<false, T> { - STATIC_ONLY(VectorCopier); - template <typename U> - static void uninitializedCopy(const U* src, const U* srcEnd, T* dst) { - while (src != srcEnd) { - new (NotNull, dst) T(*src); - ++dst; - ++src; - } - } -}; - -template <typename T> -struct VectorCopier<true, T> { - STATIC_ONLY(VectorCopier); - static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { - if (LIKELY(dst && src)) - memcpy(dst, src, reinterpret_cast<const char*>(srcEnd) - - reinterpret_cast<const char*>(src)); - } - template <typename U> - static void uninitializedCopy(const U* src, const U* srcEnd, T* dst) { - VectorCopier<false, T>::uninitializedCopy(src, srcEnd, dst); - } -}; - -template <bool canFillWithMemset, typename T> -struct VectorFiller; - -template <typename T> -struct VectorFiller<false, T> { - STATIC_ONLY(VectorFiller); - static void uninitializedFill(T* dst, T* dstEnd, const T& val) { - while (dst != dstEnd) { - new (NotNull, dst) T(val); - ++dst; - } - } -}; - -template <typename T> -struct VectorFiller<true, T> { - STATIC_ONLY(VectorFiller); - static void uninitializedFill(T* dst, T* dstEnd, const T& val) { - static_assert(sizeof(T) == sizeof(char), "size of type should be one"); -#if COMPILER(GCC) && defined(_FORTIFY_SOURCE) - if (!__builtin_constant_p(dstEnd - dst) || (!(dstEnd - dst))) - memset(dst, val, dstEnd - dst); -#else - memset(dst, val, dstEnd - dst); -#endif - } -}; - -template <bool canCompareWithMemcmp, typename T> -struct VectorComparer; - -template <typename T> -struct VectorComparer<false, T> { - STATIC_ONLY(VectorComparer); - static bool compare(const T* a, const T* b, size_t size) { - DCHECK(a); - DCHECK(b); - return std::equal(a, a + size, b); - } -}; - -template <typename T> -struct VectorComparer<true, T> { - STATIC_ONLY(VectorComparer); - static bool compare(const T* a, const T* b, size_t size) { - DCHECK(a); - DCHECK(b); - return memcmp(a, b, sizeof(T) * size) == 0; - } -}; - -template <typename T> -struct VectorElementComparer { - STATIC_ONLY(VectorElementComparer); - template <typename U> - static bool compareElement(const T& left, const U& right) { - return left == right; - } -}; - -template <typename T> -struct VectorElementComparer<std::unique_ptr<T>> { - STATIC_ONLY(VectorElementComparer); - template <typename U> - static bool compareElement(const std::unique_ptr<T>& left, const U& right) { - return left.get() == right; - } -}; - -// A collection of all the traits used by Vector. This is basically an -// implementation detail of Vector, and you probably don't want to change this. -// If you want to customize Vector's behavior, you should specialize -// VectorTraits instead (defined in VectorTraits.h). -template <typename T> -struct VectorTypeOperations { - STATIC_ONLY(VectorTypeOperations); - static void destruct(T* begin, T* end) { - VectorDestructor<VectorTraits<T>::needsDestruction, T>::destruct(begin, - end); - } - - static void initialize(T* begin, T* end) { - VectorInitializer<VectorTraits<T>::canInitializeWithMemset, T>::initialize( - begin, end); - } - - static void move(T* src, T* srcEnd, T* dst) { - VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::move(src, srcEnd, dst); - } - - static void moveOverlapping(T* src, T* srcEnd, T* dst) { - VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::moveOverlapping( - src, srcEnd, dst); - } - - static void swap(T* src, T* srcEnd, T* dst) { - VectorMover<VectorTraits<T>::canMoveWithMemcpy, T>::swap(src, srcEnd, dst); - } - - static void uninitializedCopy(const T* src, const T* srcEnd, T* dst) { - VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy( - src, srcEnd, dst); - } - - static void uninitializedFill(T* dst, T* dstEnd, const T& val) { - VectorFiller<VectorTraits<T>::canFillWithMemset, T>::uninitializedFill( - dst, dstEnd, val); - } - - static bool compare(const T* a, const T* b, size_t size) { - return VectorComparer<VectorTraits<T>::canCompareWithMemcmp, T>::compare( - a, b, size); - } - - template <typename U> - static bool compareElement(const T& left, U&& right) { - return VectorElementComparer<T>::compareElement(left, - std::forward<U>(right)); - } -}; - -// -// VectorBuffer -// - -// VectorBuffer is an implementation detail of Vector and Deque. It manages -// Vector's underlying buffer, and does operations like allocation or -// expansion. -// -// Not meant for general consumption. - -template <typename T, bool hasInlineCapacity, typename Allocator> -class VectorBufferBase { - WTF_MAKE_NONCOPYABLE(VectorBufferBase); - DISALLOW_NEW(); - - public: - void allocateBuffer(size_t newCapacity) { - DCHECK(newCapacity); - DCHECK_LE(newCapacity, - Allocator::template maxElementCountInBackingStore<T>()); - size_t sizeToAllocate = allocationSize(newCapacity); - if (hasInlineCapacity) - m_buffer = - Allocator::template allocateInlineVectorBacking<T>(sizeToAllocate); - else - m_buffer = Allocator::template allocateVectorBacking<T>(sizeToAllocate); - m_capacity = sizeToAllocate / sizeof(T); - } - - void allocateExpandedBuffer(size_t newCapacity) { - DCHECK(newCapacity); - size_t sizeToAllocate = allocationSize(newCapacity); - if (hasInlineCapacity) - m_buffer = - Allocator::template allocateInlineVectorBacking<T>(sizeToAllocate); - else - m_buffer = - Allocator::template allocateExpandedVectorBacking<T>(sizeToAllocate); - m_capacity = sizeToAllocate / sizeof(T); - } - - size_t allocationSize(size_t capacity) const { - return Allocator::template quantizedSize<T>(capacity); - } - - T* buffer() { return m_buffer; } - const T* buffer() const { return m_buffer; } - size_t capacity() const { return m_capacity; } - - void clearUnusedSlots(T* from, T* to) { - // If the vector backing is garbage-collected and needs tracing or - // finalizing, we clear out the unused slots so that the visitor or the - // finalizer does not cause a problem when visiting the unused slots. - VectorUnusedSlotClearer< - Allocator::isGarbageCollected && - (VectorTraits<T>::needsDestruction || - IsTraceableInCollectionTrait<VectorTraits<T>>::value), - T>::clear(from, to); - } - - void checkUnusedSlots(const T* from, const T* to) { -#if DCHECK_IS_ON() && !defined(ANNOTATE_CONTIGUOUS_CONTAINER) - VectorUnusedSlotClearer< - Allocator::isGarbageCollected && - (VectorTraits<T>::needsDestruction || - IsTraceableInCollectionTrait<VectorTraits<T>>::value), - T>::checkCleared(from, to); -#endif - } - - // |end| is exclusive, a la STL. - struct OffsetRange final { - OffsetRange() : begin(0), end(0) {} - explicit OffsetRange(size_t begin, size_t end) : begin(begin), end(end) { - DCHECK_LE(begin, end); - } - bool empty() const { return begin == end; } - size_t begin; - size_t end; - }; - - protected: - VectorBufferBase() : m_buffer(nullptr), m_capacity(0) {} - - VectorBufferBase(T* buffer, size_t capacity) - : m_buffer(buffer), m_capacity(capacity) {} - - T* m_buffer; - unsigned m_capacity; - unsigned m_size; -}; - -template <typename T, - size_t inlineCapacity, - typename Allocator = PartitionAllocator> -class VectorBuffer; - -template <typename T, typename Allocator> -class VectorBuffer<T, 0, Allocator> - : protected VectorBufferBase<T, false, Allocator> { - private: - using Base = VectorBufferBase<T, false, Allocator>; - - public: - using OffsetRange = typename Base::OffsetRange; - - VectorBuffer() {} - - explicit VectorBuffer(size_t capacity) { - // Calling malloc(0) might take a lock and may actually do an allocation - // on some systems. - if (capacity) - allocateBuffer(capacity); - } - - void destruct() { - deallocateBuffer(m_buffer); - m_buffer = nullptr; - } - - void deallocateBuffer(T* bufferToDeallocate) { - Allocator::freeVectorBacking(bufferToDeallocate); - } - - bool expandBuffer(size_t newCapacity) { - size_t sizeToAllocate = allocationSize(newCapacity); - if (Allocator::expandVectorBacking(m_buffer, sizeToAllocate)) { - m_capacity = sizeToAllocate / sizeof(T); - return true; - } - return false; - } - - inline bool shrinkBuffer(size_t newCapacity) { - DCHECK_LT(newCapacity, capacity()); - size_t sizeToAllocate = allocationSize(newCapacity); - if (Allocator::shrinkVectorBacking(m_buffer, allocationSize(capacity()), - sizeToAllocate)) { - m_capacity = sizeToAllocate / sizeof(T); - return true; - } - return false; - } - - void resetBufferPointer() { - m_buffer = nullptr; - m_capacity = 0; - } - - // See the other specialization for the meaning of |thisHole| and |otherHole|. - // They are irrelevant in this case. - void swapVectorBuffer(VectorBuffer<T, 0, Allocator>& other, - OffsetRange thisHole, - OffsetRange otherHole) { - static_assert(VectorTraits<T>::canSwapUsingCopyOrMove, - "Cannot swap HeapVectors of TraceWrapperMembers."); - - std::swap(m_buffer, other.m_buffer); - std::swap(m_capacity, other.m_capacity); - std::swap(m_size, other.m_size); - } - - using Base::allocateBuffer; - using Base::allocationSize; - - using Base::buffer; - using Base::capacity; - - using Base::clearUnusedSlots; - using Base::checkUnusedSlots; - - bool hasOutOfLineBuffer() const { - // When inlineCapacity is 0 we have an out of line buffer if we have a - // buffer. - return buffer(); - } - - T** bufferSlot() { return &m_buffer; } - - protected: - using Base::m_size; - - private: - using Base::m_buffer; - using Base::m_capacity; -}; - -template <typename T, size_t inlineCapacity, typename Allocator> -class VectorBuffer : protected VectorBufferBase<T, true, Allocator> { - WTF_MAKE_NONCOPYABLE(VectorBuffer); - - private: - using Base = VectorBufferBase<T, true, Allocator>; - - public: - using OffsetRange = typename Base::OffsetRange; - - VectorBuffer() : Base(inlineBuffer(), inlineCapacity) {} - - explicit VectorBuffer(size_t capacity) - : Base(inlineBuffer(), inlineCapacity) { - if (capacity > inlineCapacity) - Base::allocateBuffer(capacity); - } - - void destruct() { - deallocateBuffer(m_buffer); - m_buffer = nullptr; - } - - NEVER_INLINE void reallyDeallocateBuffer(T* bufferToDeallocate) { - Allocator::freeInlineVectorBacking(bufferToDeallocate); - } - - void deallocateBuffer(T* bufferToDeallocate) { - if (UNLIKELY(bufferToDeallocate != inlineBuffer())) - reallyDeallocateBuffer(bufferToDeallocate); - } - - bool expandBuffer(size_t newCapacity) { - DCHECK_GT(newCapacity, inlineCapacity); - if (m_buffer == inlineBuffer()) - return false; - - size_t sizeToAllocate = allocationSize(newCapacity); - if (Allocator::expandInlineVectorBacking(m_buffer, sizeToAllocate)) { - m_capacity = sizeToAllocate / sizeof(T); - return true; - } - return false; - } - - inline bool shrinkBuffer(size_t newCapacity) { - DCHECK_LT(newCapacity, capacity()); - if (newCapacity <= inlineCapacity) { - // We need to switch to inlineBuffer. Vector::shrinkCapacity will - // handle it. - return false; - } - DCHECK_NE(m_buffer, inlineBuffer()); - size_t newSize = allocationSize(newCapacity); - if (!Allocator::shrinkInlineVectorBacking( - m_buffer, allocationSize(capacity()), newSize)) - return false; - m_capacity = newSize / sizeof(T); - return true; - } - - void resetBufferPointer() { - m_buffer = inlineBuffer(); - m_capacity = inlineCapacity; - } - - void allocateBuffer(size_t newCapacity) { - // FIXME: This should DCHECK(!m_buffer) to catch misuse/leaks. - if (newCapacity > inlineCapacity) - Base::allocateBuffer(newCapacity); - else - resetBufferPointer(); - } - - void allocateExpandedBuffer(size_t newCapacity) { - if (newCapacity > inlineCapacity) - Base::allocateExpandedBuffer(newCapacity); - else - resetBufferPointer(); - } - - size_t allocationSize(size_t capacity) const { - if (capacity <= inlineCapacity) - return m_inlineBufferSize; - return Base::allocationSize(capacity); - } - - // Swap two vector buffers, both of which have the same non-zero inline - // capacity. - // - // If the data is in an out-of-line buffer, we can just pass the pointers - // across the two buffers. If the data is in an inline buffer, we need to - // either swap or move each element, depending on whether each slot is - // occupied or not. - // - // Further complication comes from the fact that VectorBuffer is also used as - // the backing store of a Deque. Deque allocates the objects like a ring - // buffer, so there may be a "hole" (unallocated region) in the middle of the - // buffer. This function assumes elements in a range [m_buffer, m_buffer + - // m_size) are all allocated except for elements within |thisHole|. The same - // applies for |other.m_buffer| and |otherHole|. - void swapVectorBuffer(VectorBuffer<T, inlineCapacity, Allocator>& other, - OffsetRange thisHole, - OffsetRange otherHole) { - using TypeOperations = VectorTypeOperations<T>; - - static_assert(VectorTraits<T>::canSwapUsingCopyOrMove, - "Cannot swap HeapVectors of TraceWrapperMembers."); - - if (buffer() != inlineBuffer() && other.buffer() != other.inlineBuffer()) { - // The easiest case: both buffers are non-inline. We just need to swap the - // pointers. - std::swap(m_buffer, other.m_buffer); - std::swap(m_capacity, other.m_capacity); - std::swap(m_size, other.m_size); - return; - } - - Allocator::enterGCForbiddenScope(); - - // Otherwise, we at least need to move some elements from one inline buffer - // to another. - // - // Terminology: "source" is a place from which elements are copied, and - // "destination" is a place to which elements are copied. thisSource or - // otherSource can be empty (represented by nullptr) when this range or - // other range is in an out-of-line buffer. - // - // We first record which range needs to get moved and where elements in such - // a range will go. Elements in an inline buffer will go to the other - // buffer's inline buffer. Elements in an out-of-line buffer won't move, - // because we can just swap pointers of out-of-line buffers. - T* thisSourceBegin = nullptr; - size_t thisSourceSize = 0; - T* thisDestinationBegin = nullptr; - if (buffer() == inlineBuffer()) { - thisSourceBegin = buffer(); - thisSourceSize = m_size; - thisDestinationBegin = other.inlineBuffer(); - if (!thisHole.empty()) { // Sanity check. - DCHECK_LT(thisHole.begin, thisHole.end); - DCHECK_LE(thisHole.end, thisSourceSize); - } - } else { - // We don't need the hole information for an out-of-line buffer. - thisHole.begin = thisHole.end = 0; - } - T* otherSourceBegin = nullptr; - size_t otherSourceSize = 0; - T* otherDestinationBegin = nullptr; - if (other.buffer() == other.inlineBuffer()) { - otherSourceBegin = other.buffer(); - otherSourceSize = other.m_size; - otherDestinationBegin = inlineBuffer(); - if (!otherHole.empty()) { - DCHECK_LT(otherHole.begin, otherHole.end); - DCHECK_LE(otherHole.end, otherSourceSize); - } - } else { - otherHole.begin = otherHole.end = 0; - } - - // Next, we mutate members and do other bookkeeping. We do pointer swapping - // (for out-of-line buffers) here if we can. From now on, don't assume - // buffer() or capacity() maintains their original values. - std::swap(m_capacity, other.m_capacity); - if (thisSourceBegin && - !otherSourceBegin) { // Our buffer is inline, theirs is not. - DCHECK_EQ(buffer(), inlineBuffer()); - DCHECK_NE(other.buffer(), other.inlineBuffer()); - ANNOTATE_DELETE_BUFFER(m_buffer, inlineCapacity, m_size); - m_buffer = other.buffer(); - other.m_buffer = other.inlineBuffer(); - std::swap(m_size, other.m_size); - ANNOTATE_NEW_BUFFER(other.m_buffer, inlineCapacity, other.m_size); - } else if (!thisSourceBegin && - otherSourceBegin) { // Their buffer is inline, ours is not. - DCHECK_NE(buffer(), inlineBuffer()); - DCHECK_EQ(other.buffer(), other.inlineBuffer()); - ANNOTATE_DELETE_BUFFER(other.m_buffer, inlineCapacity, other.m_size); - other.m_buffer = buffer(); - m_buffer = inlineBuffer(); - std::swap(m_size, other.m_size); - ANNOTATE_NEW_BUFFER(m_buffer, inlineCapacity, m_size); - } else { // Both buffers are inline. - DCHECK(thisSourceBegin); - DCHECK(otherSourceBegin); - DCHECK_EQ(buffer(), inlineBuffer()); - DCHECK_EQ(other.buffer(), other.inlineBuffer()); - ANNOTATE_CHANGE_SIZE(m_buffer, inlineCapacity, m_size, other.m_size); - ANNOTATE_CHANGE_SIZE(other.m_buffer, inlineCapacity, other.m_size, - m_size); - std::swap(m_size, other.m_size); - } - - // We are ready to move elements. We determine an action for each "section", - // which is a contiguous range such that all elements in the range are - // treated similarly. - size_t sectionBegin = 0; - while (sectionBegin < inlineCapacity) { - // To determine the end of this section, we list up all the boundaries - // where the "occupiedness" may change. - size_t sectionEnd = inlineCapacity; - if (thisSourceBegin && sectionBegin < thisSourceSize) - sectionEnd = std::min(sectionEnd, thisSourceSize); - if (!thisHole.empty() && sectionBegin < thisHole.begin) - sectionEnd = std::min(sectionEnd, thisHole.begin); - if (!thisHole.empty() && sectionBegin < thisHole.end) - sectionEnd = std::min(sectionEnd, thisHole.end); - if (otherSourceBegin && sectionBegin < otherSourceSize) - sectionEnd = std::min(sectionEnd, otherSourceSize); - if (!otherHole.empty() && sectionBegin < otherHole.begin) - sectionEnd = std::min(sectionEnd, otherHole.begin); - if (!otherHole.empty() && sectionBegin < otherHole.end) - sectionEnd = std::min(sectionEnd, otherHole.end); - - DCHECK_LT(sectionBegin, sectionEnd); - - // Is the |sectionBegin|-th element of |thisSource| occupied? - bool thisOccupied = false; - if (thisSourceBegin && sectionBegin < thisSourceSize) { - // Yes, it's occupied, unless the position is in a hole. - if (thisHole.empty() || sectionBegin < thisHole.begin || - sectionBegin >= thisHole.end) - thisOccupied = true; - } - bool otherOccupied = false; - if (otherSourceBegin && sectionBegin < otherSourceSize) { - if (otherHole.empty() || sectionBegin < otherHole.begin || - sectionBegin >= otherHole.end) - otherOccupied = true; - } - - if (thisOccupied && otherOccupied) { - // Both occupied; swap them. In this case, one's destination must be the - // other's source (i.e. both ranges are in inline buffers). - DCHECK_EQ(thisDestinationBegin, otherSourceBegin); - DCHECK_EQ(otherDestinationBegin, thisSourceBegin); - TypeOperations::swap(thisSourceBegin + sectionBegin, - thisSourceBegin + sectionEnd, - otherSourceBegin + sectionBegin); - } else if (thisOccupied) { - // Move from ours to theirs. - TypeOperations::move(thisSourceBegin + sectionBegin, - thisSourceBegin + sectionEnd, - thisDestinationBegin + sectionBegin); - Base::clearUnusedSlots(thisSourceBegin + sectionBegin, - thisSourceBegin + sectionEnd); - } else if (otherOccupied) { - // Move from theirs to ours. - TypeOperations::move(otherSourceBegin + sectionBegin, - otherSourceBegin + sectionEnd, - otherDestinationBegin + sectionBegin); - Base::clearUnusedSlots(otherSourceBegin + sectionBegin, - otherSourceBegin + sectionEnd); - } else { - // Both empty; nothing to do. - } - - sectionBegin = sectionEnd; - } - - Allocator::leaveGCForbiddenScope(); - } - - using Base::buffer; - using Base::capacity; - - bool hasOutOfLineBuffer() const { - return buffer() && buffer() != inlineBuffer(); - } - - T** bufferSlot() { return &m_buffer; } - - protected: - using Base::m_size; - - private: - using Base::m_buffer; - using Base::m_capacity; - - static const size_t m_inlineBufferSize = inlineCapacity * sizeof(T); - T* inlineBuffer() { return reinterpret_cast_ptr<T*>(m_inlineBuffer.buffer); } - const T* inlineBuffer() const { - return reinterpret_cast_ptr<const T*>(m_inlineBuffer.buffer); - } - - AlignedBuffer<m_inlineBufferSize, WTF_ALIGN_OF(T)> m_inlineBuffer; - template <typename U, size_t inlineBuffer, typename V> - friend class Deque; -}; - -// -// Vector -// - -// Vector is a container that works just like std::vector. WTF's Vector has -// several extra functionalities: inline buffer, behavior customization via -// traits, and Oilpan support. Those are explained in the sections below. -// -// Vector is the most basic container, which stores its element in a contiguous -// buffer. The buffer is expanded automatically when necessary. The elements -// are automatically moved to the new buffer. This event is called a -// reallocation. A reallocation takes O(N)-time (N = number of elements), but -// its occurrences are rare, so its time cost should not be significant, -// compared to the time cost of other operations to the vector. -// -// Time complexity of key operations is as follows: -// -// * Indexed access -- O(1) -// * Insertion or removal of an element at the end -- amortized O(1) -// * Other insertion or removal -- O(N) -// * Swapping with another vector -- O(1) -// -// 1. Iterator invalidation semantics -// -// Vector provides STL-compatible iterators and reverse iterators. Iterators -// are _invalidated_ on certain occasions. Reading an invalidated iterator -// causes undefined behavior. -// -// Iterators are invalidated on the following situations: -// -// * When a reallocation happens on a vector, all the iterators for that -// vector will be invalidated. -// * Some member functions invalidate part of the existing iterators for -// the vector; see comments on the individual functions. -// * [Oilpan only] Heap compaction invalidates all the iterators for any -// HeapVectors. This means you can only store an iterator on stack, as -// a local variable. -// -// In this context, pointers or references to an element of a Vector are -// essentially equivalent to iterators, in that they also become invalid -// whenever corresponding iterators are invalidated. -// -// 2. Inline buffer -// -// Vectors may have an _inline buffer_. An inline buffer is a storage area -// that is contained in the vector itself, along with other metadata like -// m_size. It is used as a storage space when the vector's elements fit in -// that space. If the inline buffer becomes full and further space is -// necessary, an out-of-line buffer is allocated in the heap, and it will -// take over the role of the inline buffer. -// -// The existence of an inline buffer is indicated by non-zero |inlineCapacity| -// template argument. The value represents the number of elements that can be -// stored in the inline buffer. Zero |inlineCapacity| means the vector has no -// inline buffer. -// -// An inline buffer increases the size of the Vector instances, and, in trade -// for that, it gives you several performance benefits, as long as the number -// of elements do not exceed |inlineCapacity|: -// -// * No heap allocation will be made. -// * Memory locality will improve. -// -// Generally, having an inline buffer is useful for vectors that (1) are -// frequently accessed or modified, and (2) contain only a few elements at -// most. -// -// 3. Behavior customization -// -// You usually do not need to customize Vector's behavior, since the default -// behavior is appropriate for normal usage. The behavior is controlled by -// VectorTypeOperations traits template above. Read VectorTypeOperations -// and VectorTraits if you want to change the behavior for your types (i.e. -// if you really want faster vector operations). -// -// The default traits basically do the following: -// -// * Skip constructor call and fill zeros with memset for simple types; -// * Skip destructor call for simple types; -// * Copy or move by memcpy for simple types; and -// * Customize the comparisons for smart pointer types, so you can look -// up a std::unique_ptr<T> element with a raw pointer, for instance. -// -// 4. Oilpan -// -// If you want to store garbage collected objects in Vector, (1) use HeapVector -// (defined in HeapAllocator.h) instead of Vector, and (2) make sure your -// garbage-collected type is wrapped with Member, like: -// -// HeapVector<Member<Node>> nodes; -// -// Unlike normal garbage-collected objects, a HeapVector object itself is -// NOT a garbage-collected object, but its backing buffer is allocated in -// Oilpan heap, and it may still carry garbage-collected objects. -// -// Even though a HeapVector object is not garbage-collected, you still need -// to trace it, if you stored it in your class. Also, you can allocate it -// as a local variable. This is useful when you want to build a vector locally -// and put it in an on-heap vector with swap(). -// -// Also, heap compaction, which may happen at any time when Blink code is not -// running (i.e. Blink code does not appear in the call stack), may invalidate -// existing iterators for any HeapVectors. So, essentially, you should always -// allocate an iterator on stack (as a local variable), and you should not -// store iterators in another heap object. - -template <typename T, - size_t inlineCapacity = 0, - typename Allocator = PartitionAllocator> -class Vector - : private VectorBuffer<T, INLINE_CAPACITY, Allocator>, - // Heap-allocated vectors with no inlineCapacity never need a destructor. - public ConditionalDestructor<Vector<T, INLINE_CAPACITY, Allocator>, - (INLINE_CAPACITY == 0) && - Allocator::isGarbageCollected> { - USE_ALLOCATOR(Vector, Allocator); - using Base = VectorBuffer<T, INLINE_CAPACITY, Allocator>; - using TypeOperations = VectorTypeOperations<T>; - using OffsetRange = typename Base::OffsetRange; - - public: - using ValueType = T; - using value_type = T; - - using iterator = T*; - using const_iterator = const T*; - using reverse_iterator = std::reverse_iterator<iterator>; - using const_reverse_iterator = std::reverse_iterator<const_iterator>; - - // Create an empty vector. - inline Vector(); - // Create a vector containing the specified number of default-initialized - // elements. - inline explicit Vector(size_t); - // Create a vector containing the specified number of elements, each of which - // is copy initialized from the specified value. - inline Vector(size_t, const T&); - - // Copying. - Vector(const Vector&); - template <size_t otherCapacity> - explicit Vector(const Vector<T, otherCapacity, Allocator>&); - - Vector& operator=(const Vector&); - template <size_t otherCapacity> - Vector& operator=(const Vector<T, otherCapacity, Allocator>&); - - // Moving. - Vector(Vector&&); - Vector& operator=(Vector&&); - - // Construct with an initializer list. You can do e.g. - // Vector<int> v({1, 2, 3}); - // or - // v = {4, 5, 6}; - Vector(std::initializer_list<T> elements); - Vector& operator=(std::initializer_list<T> elements); - - // Basic inquiry about the vector's state. - // - // capacity() is the maximum number of elements that the Vector can hold - // without a reallocation. It can be zero. - size_t size() const { return m_size; } - size_t capacity() const { return Base::capacity(); } - bool isEmpty() const { return !size(); } - - // at() and operator[]: Obtain the reference of the element that is located - // at the given index. The reference may be invalidated on a reallocation. - // - // at() can be used in cases like: - // pointerToVector->at(1); - // instead of: - // (*pointerToVector)[1]; - T& at(size_t i) { - RELEASE_ASSERT(i < size()); - return Base::buffer()[i]; - } - const T& at(size_t i) const { - RELEASE_ASSERT(i < size()); - return Base::buffer()[i]; - } - - T& operator[](size_t i) { return at(i); } - const T& operator[](size_t i) const { return at(i); } - - // Return a pointer to the front of the backing buffer. Those pointers get - // invalidated on a reallocation. - T* data() { return Base::buffer(); } - const T* data() const { return Base::buffer(); } - - // Iterators and reverse iterators. They are invalidated on a reallocation. - iterator begin() { return data(); } - iterator end() { return begin() + m_size; } - const_iterator begin() const { return data(); } - const_iterator end() const { return begin() + m_size; } - - reverse_iterator rbegin() { return reverse_iterator(end()); } - reverse_iterator rend() { return reverse_iterator(begin()); } - const_reverse_iterator rbegin() const { - return const_reverse_iterator(end()); - } - const_reverse_iterator rend() const { - return const_reverse_iterator(begin()); - } - - // Quick access to the first and the last element. It is invalid to call - // these functions when the vector is empty. - T& front() { return at(0); } - const T& front() const { return at(0); } - T& back() { return at(size() - 1); } - const T& back() const { return at(size() - 1); } - - // Searching. - // - // Comparisons are done in terms of compareElement(), which is usually - // operator==(). find() and reverseFind() returns an index of the element - // that is found first. If no match is found, kNotFound will be returned. - template <typename U> - bool contains(const U&) const; - template <typename U> - size_t find(const U&) const; - template <typename U> - size_t reverseFind(const U&) const; - - // Resize the vector to the specified size. - // - // These three functions are essentially similar. They differ in that - // (1) shrink() has a DCHECK to make sure the specified size is not more than - // size(), and (2) grow() has a DCHECK to make sure the specified size is - // not less than size(). - // - // When a vector shrinks, the extra elements in the back will be destructed. - // All the iterators pointing to a to-be-destructed element will be - // invalidated. - // - // When a vector grows, new elements will be added in the back, and they - // will be default-initialized. A reallocation may happen in this case. - void shrink(size_t); - void grow(size_t); - void resize(size_t); - - // Increase the capacity of the vector to at least |newCapacity|. The - // elements in the vector are not affected. This function does not shrink - // the size of the backing buffer, even if |newCapacity| is small. This - // function may cause a reallocation. - void reserveCapacity(size_t newCapacity); - - // This is similar to reserveCapacity() but must be called immediately after - // the vector is default-constructed. - void reserveInitialCapacity(size_t initialCapacity); - - // Shrink the backing buffer so it can contain exactly |size()| elements. - // This function may cause a reallocation. - void shrinkToFit() { shrinkCapacity(size()); } - - // Shrink the backing buffer if at least 50% of the vector's capacity is - // unused. If it shrinks, the new buffer contains roughly 25% of unused - // space. This function may cause a reallocation. - void shrinkToReasonableCapacity() { - if (size() * 2 < capacity()) - shrinkCapacity(size() + size() / 4 + 1); - } - - // Remove all the elements. This function actually releases the backing - // buffer, thus any iterators will get invalidated (including begin()). - void clear() { shrinkCapacity(0); } - - // Insertion to the back. All of these functions except uncheckedAppend() may - // cause a reallocation. - // - // push_back(value) - // Insert a single element to the back. - // emplace_back(args...) - // Insert a single element constructed as T(args...) to the back. The - // element is constructed directly on the backing buffer with placement - // new. - // append(buffer, size) - // appendVector(vector) - // appendRange(begin, end) - // Insert multiple elements represented by (1) |buffer| and |size| - // (for append), (2) |vector| (for appendVector), or (3) a pair of - // iterators (for appendRange) to the back. The elements will be copied. - // uncheckedAppend(value) - // Insert a single element like push_back(), but this function assumes - // the vector has enough capacity such that it can store the new element - // without a reallocation. Using this function could improve the - // performance when you append many elements repeatedly. - template <typename U> - void push_back(U&&); - template <typename... Args> - T& emplace_back(Args&&...); - ALWAYS_INLINE T& emplace_back() { - grow(m_size + 1); - return back(); - } - template <typename U> - void append(const U*, size_t); - template <typename U, size_t otherCapacity, typename V> - void appendVector(const Vector<U, otherCapacity, V>&); - template <typename Iterator> - void appendRange(Iterator begin, Iterator end); - template <typename U> - void uncheckedAppend(U&&); - - // Insertion to an arbitrary position. All of these functions will take - // O(size())-time. All of the elements after |position| will be moved to - // the new locations. |position| must be no more than size(). All of these - // functions may cause a reallocation. In any case, all the iterators - // pointing to an element after |position| will be invalidated. - // - // insert(position, value) - // Insert a single element at |position|. - // insert(position, buffer, size) - // insert(position, vector) - // Insert multiple elements represented by either |buffer| and |size| - // or |vector| at |position|. The elements will be copied. - // - // TODO(yutak): Why not insertVector()? - template <typename U> - void insert(size_t position, U&&); - template <typename U> - void insert(size_t position, const U*, size_t); - template <typename U, size_t otherCapacity, typename OtherAllocator> - void insert(size_t position, const Vector<U, otherCapacity, OtherAllocator>&); - - // Insertion to the front. All of these functions will take O(size())-time. - // All of the elements in the vector will be moved to the new locations. - // All of these functions may cause a reallocation. In any case, all the - // iterators pointing to any element in the vector will be invalidated. - // - // push_front(value) - // Insert a single element to the front. - // push_front(buffer, size) - // prependVector(vector) - // Insert multiple elements represented by either |buffer| and |size| or - // |vector| to the front. The elements will be copied. - template <typename U> - void push_front(U&&); - template <typename U> - void push_front(const U*, size_t); - template <typename U, size_t otherCapacity, typename OtherAllocator> - void prependVector(const Vector<U, otherCapacity, OtherAllocator>&); - - // Remove an element or elements at the specified position. These functions - // take O(size())-time. All of the elements after the removed ones will be - // moved to the new locations. All the iterators pointing to any element - // after |position| will be invalidated. - void remove(size_t position); - void remove(size_t position, size_t length); - - // Remove the last element. Unlike remove(), (1) this function is fast, and - // (2) only iterators pointing to the last element will be invalidated. Other - // references will remain valid. - void pop_back() { - DCHECK(!isEmpty()); - shrink(size() - 1); - } - - // Filling the vector with the same value. If the vector has shrinked or - // growed as a result of this call, those events may invalidate some - // iterators. See comments for shrink() and grow(). - // - // fill(value, size) will resize the Vector to |size|, and then copy-assign - // or copy-initialize all the elements. - // - // fill(value) is a synonym for fill(value, size()). - void fill(const T&, size_t); - void fill(const T& val) { fill(val, size()); } - - // Swap two vectors quickly. - void swap(Vector& other) { - Base::swapVectorBuffer(other, OffsetRange(), OffsetRange()); - } - - // Reverse the contents. - void reverse(); - - // Maximum element count supported; allocating a vector - // buffer with a larger count will fail. - static size_t maxCapacity() { - return Allocator::template maxElementCountInBackingStore<T>(); - } - - // Off-GC-heap vectors: Destructor should be called. - // On-GC-heap vectors: Destructor should be called for inline buffers (if - // any) but destructor shouldn't be called for vector backing since it is - // managed by the traced GC heap. - void finalize() { - if (!INLINE_CAPACITY) { - if (LIKELY(!Base::buffer())) - return; - } - ANNOTATE_DELETE_BUFFER(begin(), capacity(), m_size); - if (LIKELY(m_size) && - !(Allocator::isGarbageCollected && this->hasOutOfLineBuffer())) { - TypeOperations::destruct(begin(), end()); - m_size = 0; // Partial protection against use-after-free. - } - - Base::destruct(); - } - - void finalizeGarbageCollectedObject() { finalize(); } - - template <typename VisitorDispatcher> - void trace(VisitorDispatcher); - - class GCForbiddenScope { - STACK_ALLOCATED(); - - public: - GCForbiddenScope() { Allocator::enterGCForbiddenScope(); } - ~GCForbiddenScope() { Allocator::leaveGCForbiddenScope(); } - }; - - protected: - using Base::checkUnusedSlots; - using Base::clearUnusedSlots; - - private: - void expandCapacity(size_t newMinCapacity); - T* expandCapacity(size_t newMinCapacity, T*); - T* expandCapacity(size_t newMinCapacity, const T* data) { - return expandCapacity(newMinCapacity, const_cast<T*>(data)); - } - - template <typename U> - U* expandCapacity(size_t newMinCapacity, U*); - void shrinkCapacity(size_t newCapacity); - template <typename U> - void appendSlowCase(U&&); - - using Base::m_size; - using Base::buffer; - using Base::swapVectorBuffer; - using Base::allocateBuffer; - using Base::allocationSize; -}; - -// -// Vector out-of-line implementation -// - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Vector<T, inlineCapacity, Allocator>::Vector() { - static_assert(!std::is_polymorphic<T>::value || - !VectorTraits<T>::canInitializeWithMemset, - "Cannot initialize with memset if there is a vtable"); - static_assert(Allocator::isGarbageCollected || - !AllowsOnlyPlacementNew<T>::value || !IsTraceable<T>::value, - "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " - "have trace methods into an off-heap Vector"); - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<T>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap Vector. Use HeapVector<Member<T>> instead."); - - ANNOTATE_NEW_BUFFER(begin(), capacity(), 0); - m_size = 0; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Vector<T, inlineCapacity, Allocator>::Vector(size_t size) : Base(size) { - static_assert(!std::is_polymorphic<T>::value || - !VectorTraits<T>::canInitializeWithMemset, - "Cannot initialize with memset if there is a vtable"); - static_assert(Allocator::isGarbageCollected || - !AllowsOnlyPlacementNew<T>::value || !IsTraceable<T>::value, - "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " - "have trace methods into an off-heap Vector"); - static_assert(Allocator::isGarbageCollected || - !IsPointerToGarbageCollectedType<T>::value, - "Cannot put raw pointers to garbage-collected classes into " - "an off-heap Vector. Use HeapVector<Member<T>> instead."); - - ANNOTATE_NEW_BUFFER(begin(), capacity(), size); - m_size = size; - TypeOperations::initialize(begin(), end()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline Vector<T, inlineCapacity, Allocator>::Vector(size_t size, const T& val) - : Base(size) { - // TODO(yutak): Introduce these assertions. Some use sites call this function - // in the context where T is an incomplete type. - // - // static_assert(!std::is_polymorphic<T>::value || - // !VectorTraits<T>::canInitializeWithMemset, - // "Cannot initialize with memset if there is a vtable"); - // static_assert(Allocator::isGarbageCollected || - // !AllowsOnlyPlacementNew<T>::value || - // !IsTraceable<T>::value, - // "Cannot put DISALLOW_NEW_EXCEPT_PLACEMENT_NEW objects that " - // "have trace methods into an off-heap Vector"); - // static_assert(Allocator::isGarbageCollected || - // !IsPointerToGarbageCollectedType<T>::value, - // "Cannot put raw pointers to garbage-collected classes into " - // "an off-heap Vector. Use HeapVector<Member<T>> instead."); - - ANNOTATE_NEW_BUFFER(begin(), capacity(), size); - m_size = size; - TypeOperations::uninitializedFill(begin(), end(), val); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -Vector<T, inlineCapacity, Allocator>::Vector(const Vector& other) - : Base(other.capacity()) { - ANNOTATE_NEW_BUFFER(begin(), capacity(), other.size()); - m_size = other.size(); - TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <size_t otherCapacity> -Vector<T, inlineCapacity, Allocator>::Vector( - const Vector<T, otherCapacity, Allocator>& other) - : Base(other.capacity()) { - ANNOTATE_NEW_BUFFER(begin(), capacity(), other.size()); - m_size = other.size(); - TypeOperations::uninitializedCopy(other.begin(), other.end(), begin()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: -operator=(const Vector<T, inlineCapacity, Allocator>& other) { - if (UNLIKELY(&other == this)) - return *this; - - if (size() > other.size()) { - shrink(other.size()); - } else if (other.size() > capacity()) { - clear(); - reserveCapacity(other.size()); - DCHECK(begin()); - } - - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, other.size()); - std::copy(other.begin(), other.begin() + size(), begin()); - TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); - m_size = other.size(); - - return *this; -} - -inline bool typelessPointersAreEqual(const void* a, const void* b) { - return a == b; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <size_t otherCapacity> -Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: -operator=(const Vector<T, otherCapacity, Allocator>& other) { - // If the inline capacities match, we should call the more specific - // template. If the inline capacities don't match, the two objects - // shouldn't be allocated the same address. - DCHECK(!typelessPointersAreEqual(&other, this)); - - if (size() > other.size()) { - shrink(other.size()); - } else if (other.size() > capacity()) { - clear(); - reserveCapacity(other.size()); - DCHECK(begin()); - } - - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, other.size()); - std::copy(other.begin(), other.begin() + size(), begin()); - TypeOperations::uninitializedCopy(other.begin() + size(), other.end(), end()); - m_size = other.size(); - - return *this; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -Vector<T, inlineCapacity, Allocator>::Vector( - Vector<T, inlineCapacity, Allocator>&& other) { - m_size = 0; - // It's a little weird to implement a move constructor using swap but this - // way we don't have to add a move constructor to VectorBuffer. - swap(other); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: -operator=(Vector<T, inlineCapacity, Allocator>&& other) { - swap(other); - return *this; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -Vector<T, inlineCapacity, Allocator>::Vector(std::initializer_list<T> elements) - : Base(elements.size()) { - ANNOTATE_NEW_BUFFER(begin(), capacity(), elements.size()); - m_size = elements.size(); - TypeOperations::uninitializedCopy(elements.begin(), elements.end(), begin()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -Vector<T, inlineCapacity, Allocator>& Vector<T, inlineCapacity, Allocator>:: -operator=(std::initializer_list<T> elements) { - if (size() > elements.size()) { - shrink(elements.size()); - } else if (elements.size() > capacity()) { - clear(); - reserveCapacity(elements.size()); - DCHECK(begin()); - } - - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, elements.size()); - std::copy(elements.begin(), elements.begin() + m_size, begin()); - TypeOperations::uninitializedCopy(elements.begin() + m_size, elements.end(), - end()); - m_size = elements.size(); - - return *this; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -bool Vector<T, inlineCapacity, Allocator>::contains(const U& value) const { - return find(value) != kNotFound; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -size_t Vector<T, inlineCapacity, Allocator>::find(const U& value) const { - const T* b = begin(); - const T* e = end(); - for (const T* iter = b; iter < e; ++iter) { - if (TypeOperations::compareElement(*iter, value)) - return iter - b; - } - return kNotFound; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -size_t Vector<T, inlineCapacity, Allocator>::reverseFind(const U& value) const { - const T* b = begin(); - const T* iter = end(); - while (iter > b) { - --iter; - if (TypeOperations::compareElement(*iter, value)) - return iter - b; - } - return kNotFound; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Vector<T, inlineCapacity, Allocator>::fill(const T& val, size_t newSize) { - if (size() > newSize) { - shrink(newSize); - } else if (newSize > capacity()) { - clear(); - reserveCapacity(newSize); - DCHECK(begin()); - } - - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, newSize); - std::fill(begin(), end(), val); - TypeOperations::uninitializedFill(end(), begin() + newSize, val); - m_size = newSize; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Vector<T, inlineCapacity, Allocator>::expandCapacity( - size_t newMinCapacity) { - size_t oldCapacity = capacity(); - size_t expandedCapacity = oldCapacity; - // We use a more aggressive expansion strategy for Vectors with inline - // storage. This is because they are more likely to be on the stack, so the - // risk of heap bloat is minimized. Furthermore, exceeding the inline - // capacity limit is not supposed to happen in the common case and may - // indicate a pathological condition or microbenchmark. - if (INLINE_CAPACITY) { - expandedCapacity *= 2; - // Check for integer overflow, which could happen in the 32-bit build. - RELEASE_ASSERT(expandedCapacity > oldCapacity); - } else { - // This cannot integer overflow. - // On 64-bit, the "expanded" integer is 32-bit, and any encroachment - // above 2^32 will fail allocation in allocateBuffer(). On 32-bit, - // there's not enough address space to hold the old and new buffers. In - // addition, our underlying allocator is supposed to always fail on > - // (2^31 - 1) allocations. - expandedCapacity += (expandedCapacity / 4) + 1; - } - reserveCapacity(std::max( - newMinCapacity, - std::max(static_cast<size_t>(kInitialVectorSize), expandedCapacity))); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -T* Vector<T, inlineCapacity, Allocator>::expandCapacity(size_t newMinCapacity, - T* ptr) { - if (ptr < begin() || ptr >= end()) { - expandCapacity(newMinCapacity); - return ptr; - } - size_t index = ptr - begin(); - expandCapacity(newMinCapacity); - return begin() + index; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -inline U* Vector<T, inlineCapacity, Allocator>::expandCapacity( - size_t newMinCapacity, - U* ptr) { - expandCapacity(newMinCapacity); - return ptr; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Vector<T, inlineCapacity, Allocator>::resize(size_t size) { - if (size <= m_size) { - TypeOperations::destruct(begin() + size, end()); - clearUnusedSlots(begin() + size, end()); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); - } else { - if (size > capacity()) - expandCapacity(size); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); - TypeOperations::initialize(end(), begin() + size); - } - - m_size = size; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Vector<T, inlineCapacity, Allocator>::shrink(size_t size) { - DCHECK_LE(size, m_size); - TypeOperations::destruct(begin() + size, end()); - clearUnusedSlots(begin() + size, end()); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); - m_size = size; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Vector<T, inlineCapacity, Allocator>::grow(size_t size) { - DCHECK_GE(size, m_size); - if (size > capacity()) - expandCapacity(size); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, size); - TypeOperations::initialize(end(), begin() + size); - m_size = size; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Vector<T, inlineCapacity, Allocator>::reserveCapacity(size_t newCapacity) { - if (UNLIKELY(newCapacity <= capacity())) - return; - T* oldBuffer = begin(); - if (!oldBuffer) { - Base::allocateBuffer(newCapacity); - return; - } -#ifdef ANNOTATE_CONTIGUOUS_CONTAINER - size_t oldCapacity = capacity(); -#endif - // The Allocator::isGarbageCollected check is not needed. The check is just - // a static hint for a compiler to indicate that Base::expandBuffer returns - // false if Allocator is a PartitionAllocator. - if (Allocator::isGarbageCollected && Base::expandBuffer(newCapacity)) { - ANNOTATE_CHANGE_CAPACITY(begin(), oldCapacity, m_size, capacity()); - return; - } - T* oldEnd = end(); - Base::allocateExpandedBuffer(newCapacity); - ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); - TypeOperations::move(oldBuffer, oldEnd, begin()); - clearUnusedSlots(oldBuffer, oldEnd); - ANNOTATE_DELETE_BUFFER(oldBuffer, oldCapacity, m_size); - Base::deallocateBuffer(oldBuffer); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Vector<T, inlineCapacity, Allocator>::reserveInitialCapacity( - size_t initialCapacity) { - DCHECK(!m_size); - DCHECK(capacity() == INLINE_CAPACITY); - if (initialCapacity > INLINE_CAPACITY) { - ANNOTATE_DELETE_BUFFER(begin(), capacity(), m_size); - Base::allocateBuffer(initialCapacity); - ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); - } -} - -template <typename T, size_t inlineCapacity, typename Allocator> -void Vector<T, inlineCapacity, Allocator>::shrinkCapacity(size_t newCapacity) { - if (newCapacity >= capacity()) - return; - - if (newCapacity < size()) - shrink(newCapacity); - - T* oldBuffer = begin(); -#ifdef ANNOTATE_CONTIGUOUS_CONTAINER - size_t oldCapacity = capacity(); -#endif - if (newCapacity > 0) { - if (Base::shrinkBuffer(newCapacity)) { - ANNOTATE_CHANGE_CAPACITY(begin(), oldCapacity, m_size, capacity()); - return; - } - - T* oldEnd = end(); - Base::allocateBuffer(newCapacity); - if (begin() != oldBuffer) { - ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); - TypeOperations::move(oldBuffer, oldEnd, begin()); - clearUnusedSlots(oldBuffer, oldEnd); - ANNOTATE_DELETE_BUFFER(oldBuffer, oldCapacity, m_size); - } - } else { - Base::resetBufferPointer(); -#ifdef ANNOTATE_CONTIGUOUS_CONTAINER - if (oldBuffer != begin()) { - ANNOTATE_NEW_BUFFER(begin(), capacity(), m_size); - ANNOTATE_DELETE_BUFFER(oldBuffer, oldCapacity, m_size); - } -#endif - } - - Base::deallocateBuffer(oldBuffer); -} - -// Templatizing these is better than just letting the conversion happen -// implicitly, because for instance it allows a PassRefPtr to be appended to a -// RefPtr vector without refcount thrash. - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -ALWAYS_INLINE void Vector<T, inlineCapacity, Allocator>::push_back(U&& val) { - DCHECK(Allocator::isAllocationAllowed()); - if (LIKELY(size() != capacity())) { - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); - new (NotNull, end()) T(std::forward<U>(val)); - ++m_size; - return; - } - - appendSlowCase(std::forward<U>(val)); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename... Args> -ALWAYS_INLINE T& Vector<T, inlineCapacity, Allocator>::emplace_back( - Args&&... args) { - DCHECK(Allocator::isAllocationAllowed()); - if (UNLIKELY(size() == capacity())) - expandCapacity(size() + 1); - - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); - T* t = new (NotNull, end()) T(std::forward<Args>(args)...); - ++m_size; - return *t; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -void Vector<T, inlineCapacity, Allocator>::append(const U* data, - size_t dataSize) { - DCHECK(Allocator::isAllocationAllowed()); - size_t newSize = m_size + dataSize; - if (newSize > capacity()) { - data = expandCapacity(newSize, data); - DCHECK(begin()); - } - RELEASE_ASSERT(newSize >= m_size); - T* dest = end(); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, newSize); - VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy( - data, &data[dataSize], dest); - m_size = newSize; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -NEVER_INLINE void Vector<T, inlineCapacity, Allocator>::appendSlowCase( - U&& val) { - DCHECK_EQ(size(), capacity()); - - typename std::remove_reference<U>::type* ptr = &val; - ptr = expandCapacity(size() + 1, ptr); - DCHECK(begin()); - - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); - new (NotNull, end()) T(std::forward<U>(*ptr)); - ++m_size; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U, size_t otherCapacity, typename OtherAllocator> -inline void Vector<T, inlineCapacity, Allocator>::appendVector( - const Vector<U, otherCapacity, OtherAllocator>& val) { - append(val.begin(), val.size()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename Iterator> -void Vector<T, inlineCapacity, Allocator>::appendRange(Iterator begin, - Iterator end) { - for (Iterator it = begin; it != end; ++it) - push_back(*it); -} - -// This version of append saves a branch in the case where you know that the -// vector's capacity is large enough for the append to succeed. -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -ALWAYS_INLINE void Vector<T, inlineCapacity, Allocator>::uncheckedAppend( - U&& val) { -#ifdef ANNOTATE_CONTIGUOUS_CONTAINER - // Vectors in ASAN builds don't have inlineCapacity. - push_back(std::forward<U>(val)); -#else - DCHECK_LT(size(), capacity()); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); - new (NotNull, end()) T(std::forward<U>(val)); - ++m_size; -#endif -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -inline void Vector<T, inlineCapacity, Allocator>::insert(size_t position, - U&& val) { - DCHECK(Allocator::isAllocationAllowed()); - RELEASE_ASSERT(position <= size()); - typename std::remove_reference<U>::type* data = &val; - if (size() == capacity()) { - data = expandCapacity(size() + 1, data); - DCHECK(begin()); - } - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size + 1); - T* spot = begin() + position; - TypeOperations::moveOverlapping(spot, end(), spot + 1); - new (NotNull, spot) T(std::forward<U>(*data)); - ++m_size; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -void Vector<T, inlineCapacity, Allocator>::insert(size_t position, - const U* data, - size_t dataSize) { - DCHECK(Allocator::isAllocationAllowed()); - RELEASE_ASSERT(position <= size()); - size_t newSize = m_size + dataSize; - if (newSize > capacity()) { - data = expandCapacity(newSize, data); - DCHECK(begin()); - } - RELEASE_ASSERT(newSize >= m_size); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, newSize); - T* spot = begin() + position; - TypeOperations::moveOverlapping(spot, end(), spot + dataSize); - VectorCopier<VectorTraits<T>::canCopyWithMemcpy, T>::uninitializedCopy( - data, &data[dataSize], spot); - m_size = newSize; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U, size_t otherCapacity, typename OtherAllocator> -inline void Vector<T, inlineCapacity, Allocator>::insert( - size_t position, - const Vector<U, otherCapacity, OtherAllocator>& val) { - insert(position, val.begin(), val.size()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -inline void Vector<T, inlineCapacity, Allocator>::push_front(U&& val) { - insert(0, std::forward<U>(val)); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U> -void Vector<T, inlineCapacity, Allocator>::push_front(const U* data, - size_t dataSize) { - insert(0, data, dataSize); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename U, size_t otherCapacity, typename OtherAllocator> -inline void Vector<T, inlineCapacity, Allocator>::prependVector( - const Vector<U, otherCapacity, OtherAllocator>& val) { - insert(0, val.begin(), val.size()); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Vector<T, inlineCapacity, Allocator>::remove(size_t position) { - RELEASE_ASSERT(position < size()); - T* spot = begin() + position; - spot->~T(); - TypeOperations::moveOverlapping(spot + 1, end(), spot); - clearUnusedSlots(end() - 1, end()); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size - 1); - --m_size; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Vector<T, inlineCapacity, Allocator>::remove(size_t position, - size_t length) { - SECURITY_DCHECK(position <= size()); - if (!length) - return; - RELEASE_ASSERT(position + length <= size()); - T* beginSpot = begin() + position; - T* endSpot = beginSpot + length; - TypeOperations::destruct(beginSpot, endSpot); - TypeOperations::moveOverlapping(endSpot, end(), beginSpot); - clearUnusedSlots(end() - length, end()); - ANNOTATE_CHANGE_SIZE(begin(), capacity(), m_size, m_size - length); - m_size -= length; -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void Vector<T, inlineCapacity, Allocator>::reverse() { - for (size_t i = 0; i < m_size / 2; ++i) - std::swap(at(i), at(m_size - 1 - i)); -} - -template <typename T, size_t inlineCapacity, typename Allocator> -inline void swap(Vector<T, inlineCapacity, Allocator>& a, - Vector<T, inlineCapacity, Allocator>& b) { - a.swap(b); -} - -template <typename T, - size_t inlineCapacityA, - size_t inlineCapacityB, - typename Allocator> -bool operator==(const Vector<T, inlineCapacityA, Allocator>& a, - const Vector<T, inlineCapacityB, Allocator>& b) { - if (a.size() != b.size()) - return false; - if (a.isEmpty()) - return true; - return VectorTypeOperations<T>::compare(a.data(), b.data(), a.size()); -} - -template <typename T, - size_t inlineCapacityA, - size_t inlineCapacityB, - typename Allocator> -inline bool operator!=(const Vector<T, inlineCapacityA, Allocator>& a, - const Vector<T, inlineCapacityB, Allocator>& b) { - return !(a == b); -} - -// This is only called if the allocator is a HeapAllocator. It is used when -// visiting during a tracing GC. -template <typename T, size_t inlineCapacity, typename Allocator> -template <typename VisitorDispatcher> -void Vector<T, inlineCapacity, Allocator>::trace(VisitorDispatcher visitor) { - DCHECK(Allocator::isGarbageCollected) << "Garbage collector must be enabled."; - if (!buffer()) - return; - if (this->hasOutOfLineBuffer()) { - // This is a performance optimization for a case where the buffer has - // been already traced by somewhere. This can happen if the conservative - // scanning traced an on-stack (false-positive or real) pointer to the - // HeapVector, and then visitor->trace() traces the HeapVector. - if (Allocator::isHeapObjectAlive(buffer())) - return; - Allocator::markNoTracing(visitor, buffer()); - Allocator::registerBackingStoreReference(visitor, Base::bufferSlot()); - } - const T* bufferBegin = buffer(); - const T* bufferEnd = buffer() + size(); - if (IsTraceableInCollectionTrait<VectorTraits<T>>::value) { - for (const T* bufferEntry = bufferBegin; bufferEntry != bufferEnd; - bufferEntry++) - Allocator::template trace<VisitorDispatcher, T, VectorTraits<T>>( - visitor, *const_cast<T*>(bufferEntry)); - checkUnusedSlots(buffer() + size(), buffer() + capacity()); - } -} - -} // namespace WTF - -using WTF::Vector; - -#endif // WTF_Vector_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Source/wtf/VectorTraits.h b/third_party/WebKit/Source/wtf/VectorTraits.h index b721f462..eadd393 100644 --- a/third_party/WebKit/Source/wtf/VectorTraits.h +++ b/third_party/WebKit/Source/wtf/VectorTraits.h
@@ -1,186 +1,9 @@ -/* - * Copyright (C) 2006, 2007, 2008 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Library General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Library General Public License for more details. - * - * You should have received a copy of the GNU Library General Public License - * along with this library; see the file COPYING.LIB. If not, write to - * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, - * Boston, MA 02110-1301, USA. - * - */ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#ifndef WTF_VectorTraits_h -#define WTF_VectorTraits_h +#include "platform/wtf/VectorTraits.h" -#include "wtf/RefPtr.h" -#include "wtf/TypeTraits.h" -#include <memory> -#include <type_traits> -#include <utility> - -namespace WTF { - -template <typename T> -struct VectorTraitsBase { - static const bool needsDestruction = !IsTriviallyDestructible<T>::value; - - static const bool canInitializeWithMemset = - IsTriviallyDefaultConstructible<T>::value; - // true iff memset(slot, 0, size) constructs an unused slot value that is - // valid for Oilpan to trace and if the value needs destruction, its - // destructor can be invoked over. The zero'ed value representing an unused - // slot in the vector's backing storage; it does not have to be equal to - // what its constructor(s) would create, only be valid for those two uses. - static const bool canClearUnusedSlotsWithMemset = - IsTriviallyDefaultConstructible<T>::value; - - static const bool canMoveWithMemcpy = IsTriviallyMoveAssignable<T>::value; - static const bool canCopyWithMemcpy = IsTriviallyCopyAssignable<T>::value; - static const bool canFillWithMemset = - IsTriviallyDefaultConstructible<T>::value && (sizeof(T) == sizeof(char)); - static const bool canCompareWithMemcmp = - std::is_scalar<T>::value; // Types without padding. - - // Supports swapping elements using regular std::swap semantics. - static const bool canSwapUsingCopyOrMove = true; - - template <typename U = void> - struct IsTraceableInCollection { - static const bool value = IsTraceable<T>::value; - }; - // We don't support weak handling in vectors. - static const WeakHandlingFlag weakHandlingFlag = NoWeakHandlingInCollections; -}; - -template <typename T> -struct VectorTraits : VectorTraitsBase<T> {}; - -// Classes marked with SimpleVectorTraits will use memmov, memcpy, memcmp -// instead of constructors, copy operators, etc for initialization, move and -// comparison. -template <typename T> -struct SimpleClassVectorTraits : VectorTraitsBase<T> { - static const bool canInitializeWithMemset = true; - static const bool canClearUnusedSlotsWithMemset = true; - static const bool canMoveWithMemcpy = true; - static const bool canCompareWithMemcmp = true; -}; - -// We know std::unique_ptr and RefPtr are simple enough that initializing to 0 -// and moving with memcpy (and then not destructing the original) will totally -// work. -template <typename P> -struct VectorTraits<RefPtr<P>> : SimpleClassVectorTraits<RefPtr<P>> {}; - -template <typename P> -struct VectorTraits<std::unique_ptr<P>> - : SimpleClassVectorTraits<std::unique_ptr<P>> { - // std::unique_ptr -> std::unique_ptr has a very particular structure that - // tricks the normal type traits into thinking that the class is "trivially - // copyable". - static const bool canCopyWithMemcpy = false; -}; -static_assert(VectorTraits<RefPtr<int>>::canInitializeWithMemset, - "inefficient RefPtr Vector"); -static_assert(VectorTraits<RefPtr<int>>::canMoveWithMemcpy, - "inefficient RefPtr Vector"); -static_assert(VectorTraits<RefPtr<int>>::canCompareWithMemcmp, - "inefficient RefPtr Vector"); -static_assert(VectorTraits<std::unique_ptr<int>>::canInitializeWithMemset, - "inefficient std::unique_ptr Vector"); -static_assert(VectorTraits<std::unique_ptr<int>>::canMoveWithMemcpy, - "inefficient std::unique_ptr Vector"); -static_assert(VectorTraits<std::unique_ptr<int>>::canCompareWithMemcmp, - "inefficient std::unique_ptr Vector"); - -template <typename First, typename Second> -struct VectorTraits<std::pair<First, Second>> { - typedef VectorTraits<First> FirstTraits; - typedef VectorTraits<Second> SecondTraits; - - static const bool needsDestruction = - FirstTraits::needsDestruction || SecondTraits::needsDestruction; - static const bool canInitializeWithMemset = - FirstTraits::canInitializeWithMemset && - SecondTraits::canInitializeWithMemset; - static const bool canMoveWithMemcpy = - FirstTraits::canMoveWithMemcpy && SecondTraits::canMoveWithMemcpy; - static const bool canCopyWithMemcpy = - FirstTraits::canCopyWithMemcpy && SecondTraits::canCopyWithMemcpy; - static const bool canFillWithMemset = false; - static const bool canCompareWithMemcmp = - FirstTraits::canCompareWithMemcmp && SecondTraits::canCompareWithMemcmp; - static const bool canClearUnusedSlotsWithMemset = - FirstTraits::canClearUnusedSlotsWithMemset && - SecondTraits::canClearUnusedSlotsWithMemset; - // Supports swapping elements using regular std::swap semantics. - static const bool canSwapUsingCopyOrMove = true; - template <typename U = void> - struct IsTraceableInCollection { - static const bool value = - IsTraceableInCollectionTrait<FirstTraits>::value || - IsTraceableInCollectionTrait<SecondTraits>::value; - }; - // We don't support weak handling in vectors. - static const WeakHandlingFlag weakHandlingFlag = NoWeakHandlingInCollections; -}; - -} // namespace WTF - -#define WTF_ALLOW_MOVE_INIT_AND_COMPARE_WITH_MEM_FUNCTIONS(ClassName) \ - namespace WTF { \ - static_assert(!IsTriviallyDefaultConstructible<ClassName>::value || \ - !IsTriviallyMoveAssignable<ClassName>::value || \ - !std::is_scalar<ClassName>::value, \ - "macro not needed"); \ - template <> \ - struct VectorTraits<ClassName> : SimpleClassVectorTraits<ClassName> {}; \ - } - -#define WTF_ALLOW_MOVE_AND_INIT_WITH_MEM_FUNCTIONS(ClassName) \ - namespace WTF { \ - static_assert(!IsTriviallyDefaultConstructible<ClassName>::value || \ - !IsTriviallyMoveAssignable<ClassName>::value, \ - "macro not needed"); \ - template <> \ - struct VectorTraits<ClassName> : VectorTraitsBase<ClassName> { \ - static const bool canInitializeWithMemset = true; \ - static const bool canClearUnusedSlotsWithMemset = true; \ - static const bool canMoveWithMemcpy = true; \ - }; \ - } - -#define WTF_ALLOW_INIT_WITH_MEM_FUNCTIONS(ClassName) \ - namespace WTF { \ - static_assert(!IsTriviallyDefaultConstructible<ClassName>::value, \ - "macro not needed"); \ - template <> \ - struct VectorTraits<ClassName> : VectorTraitsBase<ClassName> { \ - static const bool canInitializeWithMemset = true; \ - static const bool canClearUnusedSlotsWithMemset = true; \ - }; \ - } - -#define WTF_ALLOW_CLEAR_UNUSED_SLOTS_WITH_MEM_FUNCTIONS(ClassName) \ - namespace WTF { \ - static_assert(!IsTriviallyDefaultConstructible<ClassName>::value, \ - "macro not needed"); \ - template <> \ - struct VectorTraits<ClassName> : VectorTraitsBase<ClassName> { \ - static const bool canClearUnusedSlotsWithMemset = true; \ - }; \ - } - -using WTF::VectorTraits; -using WTF::SimpleClassVectorTraits; - -#endif // WTF_VectorTraits_h +// The contents of this header was moved to platform/wtf as part of +// WTF migration project. See the following post for details: +// https://groups.google.com/a/chromium.org/d/msg/blink-dev/tLdAZCTlcAA/bYXVT8gYCAAJ
diff --git a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/android.py b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/android.py index 8e59b06b..244dfede 100644 --- a/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/android.py +++ b/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/port/android.py
@@ -814,7 +814,7 @@ try: tombstones = self._device.RunShellCommand( 'ls -n /data/tombstones/tombstone_*', - check_return=True) + check_return=True, shell=True) except device_errors.CommandFailedError as exc: # FIXME: crbug.com/321489 ... figure out why we sometimes get # permission denied.
diff --git a/third_party/protobuf/BUILD.gn b/third_party/protobuf/BUILD.gn index 829cfdfe..067416e 100644 --- a/third_party/protobuf/BUILD.gn +++ b/third_party/protobuf/BUILD.gn
@@ -36,6 +36,11 @@ ] } +protobuf_globals_sources = [ + "src/google/protobuf/globals.cc", + "src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc", +] + protobuf_lite_sources = [ "src/google/protobuf/arena.cc", "src/google/protobuf/arena.h", @@ -73,7 +78,6 @@ "src/google/protobuf/stubs/atomicops_internals_ppc_gcc.h", "src/google/protobuf/stubs/atomicops_internals_solaris.h", "src/google/protobuf/stubs/atomicops_internals_tsan.h", - "src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc", "src/google/protobuf/stubs/atomicops_internals_x86_gcc.h", "src/google/protobuf/stubs/atomicops_internals_x86_msvc.cc", "src/google/protobuf/stubs/atomicops_internals_x86_msvc.h", @@ -160,10 +164,7 @@ ":protobuf_use_dlls", ] defines = [ "LIBPROTOBUF_EXPORTS" ] - sources = [ - "src/google/protobuf/globals.cc", - "src/google/protobuf/stubs/atomicops_internals_x86_gcc.cc", - ] + sources = protobuf_globals_sources } } @@ -199,7 +200,7 @@ if (is_component_build && is_linux && !is_chromeos) { deps += [ ":protobuf_globals" ] } else { - sources += [ "src/google/protobuf/globals.cc" ] + sources += protobuf_globals_sources } # Required for component builds. See http://crbug.com/172800. @@ -223,8 +224,7 @@ "//third_party/libprotobuf-mutator:*", ] - sources = protobuf_lite_sources - sources += [ + sources = protobuf_lite_sources + protobuf_globals_sources + [ "src/google/protobuf/any.cc", "src/google/protobuf/any.h", "src/google/protobuf/any.pb.cc", @@ -254,7 +254,6 @@ "src/google/protobuf/generated_enum_util.h", "src/google/protobuf/generated_message_reflection.cc", "src/google/protobuf/generated_message_reflection.h", - "src/google/protobuf/globals.cc", # gzip_stream.cc pulls in zlib, but it's not actually used by protoc, just # by test code, so instead of compiling zlib for the host, let's just
diff --git a/tools/metrics/histograms/histograms.xml b/tools/metrics/histograms/histograms.xml index 69a00de..9f40d83 100644 --- a/tools/metrics/histograms/histograms.xml +++ b/tools/metrics/histograms/histograms.xml
@@ -41207,6 +41207,14 @@ </summary> </histogram> +<histogram name="NewTabPage.LogoImageDownloaded" enum="BooleanFromHTTPCache"> + <owner>treib@chromium.org</owner> + <summary> + A logo image (static or CTA) was downloaded. Recorded only when the image + was downloaded and decoded without errors. + </summary> +</histogram> + <histogram name="NewTabPage.LogoShown" enum="NewTabPageLogoShown"> <owner>ianwen@chromium.org</owner> <summary> @@ -83143,6 +83151,11 @@ <int value="1" label="From Address Book"/> </enum> +<enum name="BooleanFromHTTPCache" type="int"> + <int value="0" label="Downloaded from network"/> + <int value="1" label="HTTP cache hit"/> +</enum> + <enum name="BooleanGAIAWebViewFlow" type="int"> <int value="0" label="iframe-based flow"/> <int value="1" label="WebView-based flow"/>
diff --git a/ui/aura/mus/mus_context_factory.cc b/ui/aura/mus/mus_context_factory.cc index 6a099290..be536b8 100644 --- a/ui/aura/mus/mus_context_factory.cc +++ b/ui/aura/mus/mus_context_factory.cc
@@ -26,9 +26,18 @@ WindowTreeHost::GetForAcceleratedWidget(compositor->widget()); WindowPortMus* window_port = WindowPortMus::Get(host->window()); DCHECK(window_port); - auto compositor_frame_sink = window_port->RequestCompositorFrameSink( + window_port->RequestCompositorFrameSink( gpu_->CreateContextProvider(std::move(gpu_channel)), - gpu_->gpu_memory_buffer_manager()); + gpu_->gpu_memory_buffer_manager(), + base::Bind(&MusContextFactory::OnCompositorFrameSinkAvailable, + weak_ptr_factory_.GetWeakPtr(), compositor)); +} + +void MusContextFactory::OnCompositorFrameSinkAvailable( + base::WeakPtr<ui::Compositor> compositor, + std::unique_ptr<cc::CompositorFrameSink> compositor_frame_sink) { + if (!compositor) + return; compositor->SetCompositorFrameSink(std::move(compositor_frame_sink)); }
diff --git a/ui/aura/mus/mus_context_factory.h b/ui/aura/mus/mus_context_factory.h index babca479..0af72773 100644 --- a/ui/aura/mus/mus_context_factory.h +++ b/ui/aura/mus/mus_context_factory.h
@@ -37,6 +37,10 @@ void OnEstablishedGpuChannel(base::WeakPtr<ui::Compositor> compositor, scoped_refptr<gpu::GpuChannelHost> gpu_channel); + void OnCompositorFrameSinkAvailable( + base::WeakPtr<ui::Compositor> compositor, + std::unique_ptr<cc::CompositorFrameSink> compositor_frame_sink); + // ContextFactory: void CreateCompositorFrameSink( base::WeakPtr<ui::Compositor> compositor) override;
diff --git a/ui/aura/mus/window_mus.h b/ui/aura/mus/window_mus.h index f88d042..873ac31c 100644 --- a/ui/aura/mus/window_mus.h +++ b/ui/aura/mus/window_mus.h
@@ -82,6 +82,8 @@ virtual void SetPredefinedCursorFromServer(ui::mojom::Cursor cursor) = 0; virtual void SetPropertyFromServer(const std::string& property_name, const std::vector<uint8_t>* data) = 0; + virtual void SetFrameSinkIdFromServer( + const cc::FrameSinkId& frame_sink_id) = 0; virtual void SetSurfaceInfoFromServer( const cc::SurfaceInfo& surface_info) = 0; // The window was deleted on the server side. DestroyFromServer() should
diff --git a/ui/aura/mus/window_port_mus.cc b/ui/aura/mus/window_port_mus.cc index fc09504..2d50370 100644 --- a/ui/aura/mus/window_port_mus.cc +++ b/ui/aura/mus/window_port_mus.cc
@@ -82,18 +82,39 @@ window_tree_client_->Embed(window_, std::move(client), flags, callback); } -std::unique_ptr<ui::ClientCompositorFrameSink> -WindowPortMus::RequestCompositorFrameSink( +void WindowPortMus::RequestCompositorFrameSink( scoped_refptr<cc::ContextProvider> context_provider, - gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager) { + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback) { + DCHECK(pending_compositor_frame_sink_request_.is_null()); + // If we haven't received a FrameSinkId from the window server yet then we + // bind the parameters to a closure that will be called once the FrameSinkId + // is available. + if (!frame_sink_id_.is_valid()) { + pending_compositor_frame_sink_request_ = + base::Bind(&WindowPortMus::RequestCompositorFrameSinkInternal, + base::Unretained(this), std::move(context_provider), + gpu_memory_buffer_manager, callback); + return; + } + + RequestCompositorFrameSinkInternal(std::move(context_provider), + gpu_memory_buffer_manager, callback); +} + +void WindowPortMus::RequestCompositorFrameSinkInternal( + scoped_refptr<cc::ContextProvider> context_provider, + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback) { + DCHECK(frame_sink_id_.is_valid()); std::unique_ptr<ui::ClientCompositorFrameSinkBinding> compositor_frame_sink_binding; std::unique_ptr<ui::ClientCompositorFrameSink> compositor_frame_sink = ui::ClientCompositorFrameSink::Create( - cc::FrameSinkId(server_id(), 0), std::move(context_provider), + frame_sink_id_, std::move(context_provider), gpu_memory_buffer_manager, &compositor_frame_sink_binding); AttachCompositorFrameSink(std::move(compositor_frame_sink_binding)); - return compositor_frame_sink; + callback.Run(std::move(compositor_frame_sink)); } void WindowPortMus::AttachCompositorFrameSink( @@ -247,6 +268,13 @@ property_data); } +void WindowPortMus::SetFrameSinkIdFromServer( + const cc::FrameSinkId& frame_sink_id) { + frame_sink_id_ = frame_sink_id; + if (!pending_compositor_frame_sink_request_.is_null()) + base::ResetAndReturn(&pending_compositor_frame_sink_request_).Run(); +} + void WindowPortMus::SetSurfaceInfoFromServer( const cc::SurfaceInfo& surface_info) { if (surface_info_.is_valid()) {
diff --git a/ui/aura/mus/window_port_mus.h b/ui/aura/mus/window_port_mus.h index c1f6e35..f70829643 100644 --- a/ui/aura/mus/window_port_mus.h +++ b/ui/aura/mus/window_port_mus.h
@@ -68,9 +68,17 @@ uint32_t flags, const ui::mojom::WindowTree::EmbedCallback& callback); - std::unique_ptr<ui::ClientCompositorFrameSink> RequestCompositorFrameSink( + using CompositorFrameSinkCallback = + base::Callback<void(std::unique_ptr<cc::CompositorFrameSink>)>; + void RequestCompositorFrameSink( scoped_refptr<cc::ContextProvider> context_provider, - gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager); + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback); + + void RequestCompositorFrameSinkInternal( + scoped_refptr<cc::ContextProvider> context_provider, + gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, + const CompositorFrameSinkCallback& callback); void AttachCompositorFrameSink( std::unique_ptr<ui::ClientCompositorFrameSinkBinding> @@ -210,6 +218,7 @@ void SetPropertyFromServer( const std::string& property_name, const std::vector<uint8_t>* property_data) override; + void SetFrameSinkIdFromServer(const cc::FrameSinkId& frame_sink_id) override; void SetSurfaceInfoFromServer(const cc::SurfaceInfo& surface_info) override; void DestroyFromServer() override; void AddTransientChildFromServer(WindowMus* child) override; @@ -250,6 +259,9 @@ ServerChangeIdType next_server_change_id_ = 0; ServerChanges server_changes_; + cc::FrameSinkId frame_sink_id_; + base::Closure pending_compositor_frame_sink_request_; + cc::SurfaceInfo surface_info_; ui::mojom::Cursor predefined_cursor_ = ui::mojom::Cursor::CURSOR_NULL;
diff --git a/ui/aura/mus/window_tree_client.cc b/ui/aura/mus/window_tree_client.cc index 6cd345db..ad0395c 100644 --- a/ui/aura/mus/window_tree_client.cc +++ b/ui/aura/mus/window_tree_client.cc
@@ -439,13 +439,14 @@ std::unique_ptr<WindowTreeHostMus> WindowTreeClient::CreateWindowTreeHost( WindowMusType window_mus_type, const ui::mojom::WindowData& window_data, - int64_t display_id) { + int64_t display_id, + const cc::FrameSinkId& frame_sink_id) { std::unique_ptr<WindowPortMus> window_port = CreateWindowPortMus(window_data, window_mus_type); roots_.insert(window_port.get()); std::unique_ptr<WindowTreeHostMus> window_tree_host = base::MakeUnique<WindowTreeHostMus>(std::move(window_port), this, - display_id); + display_id, frame_sink_id); window_tree_host->InitHost(); SetLocalPropertiesFromServerProperties( WindowMus::Get(window_tree_host->window()), window_data); @@ -528,15 +529,16 @@ ui::mojom::WindowDataPtr root_data, int64_t display_id, Id focused_window_id, - bool drawn) { + bool drawn, + const cc::FrameSinkId& frame_sink_id) { // WARNING: this is only called if WindowTreeClient was created as the // result of an embedding. client_id_ = client_id; WindowTreeConnectionEstablished(window_tree); DCHECK(roots_.empty()); - std::unique_ptr<WindowTreeHostMus> window_tree_host = - CreateWindowTreeHost(WindowMusType::EMBED, *root_data, display_id); + std::unique_ptr<WindowTreeHostMus> window_tree_host = CreateWindowTreeHost( + WindowMusType::EMBED, *root_data, display_id, frame_sink_id); focus_synchronizer_->SetFocusFromServer( GetWindowByServerId(focused_window_id)); @@ -547,13 +549,14 @@ WindowTreeHostMus* WindowTreeClient::WmNewDisplayAddedImpl( const display::Display& display, ui::mojom::WindowDataPtr root_data, - bool parent_drawn) { + bool parent_drawn, + const cc::FrameSinkId& frame_sink_id) { DCHECK(window_manager_delegate_); window_manager_delegate_->OnWmWillCreateDisplay(display); - std::unique_ptr<WindowTreeHostMus> window_tree_host = - CreateWindowTreeHost(WindowMusType::DISPLAY, *root_data, display.id()); + std::unique_ptr<WindowTreeHostMus> window_tree_host = CreateWindowTreeHost( + WindowMusType::DISPLAY, *root_data, display.id(), frame_sink_id); WindowTreeHostMus* window_tree_host_ptr = window_tree_host.get(); window_manager_delegate_->OnWmNewDisplay(std::move(window_tree_host), @@ -871,7 +874,8 @@ ui::mojom::WindowTreePtr tree, int64_t display_id, Id focused_window_id, - bool drawn) { + bool drawn, + const cc::FrameSinkId& frame_sink_id) { DCHECK(!tree_ptr_); tree_ptr_ = std::move(tree); @@ -883,7 +887,7 @@ } OnEmbedImpl(tree_ptr_.get(), client_id, std::move(root_data), display_id, - focused_window_id, drawn); + focused_window_id, drawn, frame_sink_id); } void WindowTreeClient::OnEmbeddedAppDisconnected(Id window_id) { @@ -919,7 +923,8 @@ void WindowTreeClient::OnTopLevelCreated(uint32_t change_id, ui::mojom::WindowDataPtr data, int64_t display_id, - bool drawn) { + bool drawn, + const cc::FrameSinkId& frame_sink_id) { // The server ack'd the top level window we created and supplied the state // of the window at the time the server created it. For properties we do not // have changes in flight for we can update them immediately. For properties @@ -983,6 +988,8 @@ // Top level windows should not have a parent. DCHECK_EQ(0u, data->parent_id); + + window->SetFrameSinkIdFromServer(frame_sink_id); } void WindowTreeClient::OnWindowBoundsChanged( @@ -1351,8 +1358,10 @@ void WindowTreeClient::WmNewDisplayAdded(const display::Display& display, ui::mojom::WindowDataPtr root_data, - bool parent_drawn) { - WmNewDisplayAddedImpl(display, std::move(root_data), parent_drawn); + bool parent_drawn, + const cc::FrameSinkId& frame_sink_id) { + WmNewDisplayAddedImpl(display, std::move(root_data), parent_drawn, + frame_sink_id); } void WindowTreeClient::WmDisplayRemoved(int64_t display_id) {
diff --git a/ui/aura/mus/window_tree_client.h b/ui/aura/mus/window_tree_client.h index 7ca78f4..8c9c8098 100644 --- a/ui/aura/mus/window_tree_client.h +++ b/ui/aura/mus/window_tree_client.h
@@ -231,7 +231,8 @@ std::unique_ptr<WindowTreeHostMus> CreateWindowTreeHost( WindowMusType window_mus_type, const ui::mojom::WindowData& window_data, - int64_t display_id); + int64_t display_id, + const cc::FrameSinkId& frame_sink_id = cc::FrameSinkId()); WindowMus* NewWindowFromWindowData(WindowMus* parent, const ui::mojom::WindowData& window_data); @@ -257,12 +258,15 @@ ui::mojom::WindowDataPtr root_data, int64_t display_id, Id focused_window_id, - bool drawn); + bool drawn, + const cc::FrameSinkId& frame_sink_id); // Called by WmNewDisplayAdded(). - WindowTreeHostMus* WmNewDisplayAddedImpl(const display::Display& display, - ui::mojom::WindowDataPtr root_data, - bool parent_drawn); + WindowTreeHostMus* WmNewDisplayAddedImpl( + const display::Display& display, + ui::mojom::WindowDataPtr root_data, + bool parent_drawn, + const cc::FrameSinkId& frame_sink_id); std::unique_ptr<EventResultCallback> CreateEventResultCallback( int32_t event_id); @@ -309,7 +313,8 @@ ui::mojom::WindowTreePtr tree, int64_t display_id, Id focused_window_id, - bool drawn) override; + bool drawn, + const cc::FrameSinkId& frame_sink_Id) override; void OnEmbeddedAppDisconnected(Id window_id) override; void OnUnembed(Id window_id) override; void OnCaptureChanged(Id new_capture_window_id, @@ -317,7 +322,8 @@ void OnTopLevelCreated(uint32_t change_id, ui::mojom::WindowDataPtr data, int64_t display_id, - bool drawn) override; + bool drawn, + const cc::FrameSinkId& frame_sink_id) override; void OnWindowBoundsChanged( Id window_id, const gfx::Rect& old_bounds, @@ -394,7 +400,8 @@ void OnConnect(ClientSpecificId client_id) override; void WmNewDisplayAdded(const display::Display& display, ui::mojom::WindowDataPtr root_data, - bool parent_drawn) override; + bool parent_drawn, + const cc::FrameSinkId& frame_sink_id) override; void WmDisplayRemoved(int64_t display_id) override; void WmDisplayModified(const display::Display& display) override; void WmSetBounds(uint32_t change_id,
diff --git a/ui/aura/mus/window_tree_client_unittest.cc b/ui/aura/mus/window_tree_client_unittest.cc index 48f7040..b31de25 100644 --- a/ui/aura/mus/window_tree_client_unittest.cc +++ b/ui/aura/mus/window_tree_client_unittest.cc
@@ -574,7 +574,8 @@ TEST_F(WindowTreeClientClientTest, InputEventBasic) { InputEventBasicTestWindowDelegate window_delegate(window_tree()); - WindowTreeHostMus window_tree_host(window_tree_client_impl()); + WindowTreeHostMus window_tree_host(window_tree_client_impl(), + cc::FrameSinkId(1, 1)); Window* top_level = window_tree_host.window(); const gfx::Rect bounds(0, 0, 100, 100); window_tree_host.SetBoundsInPixels(bounds); @@ -607,7 +608,8 @@ } TEST_F(WindowTreeClientClientTest, InputEventFindTargetAndConversion) { - WindowTreeHostMus window_tree_host(window_tree_client_impl()); + WindowTreeHostMus window_tree_host(window_tree_client_impl(), + cc::FrameSinkId(1, 1)); Window* top_level = window_tree_host.window(); const gfx::Rect bounds(0, 0, 100, 100); window_tree_host.SetBoundsInPixels(bounds); @@ -1081,8 +1083,8 @@ ui::mojom::WindowDataPtr data = ui::mojom::WindowData::New(); data->window_id = server_id(top_level); const int64_t display_id = 1; - window_tree_client()->OnTopLevelCreated(change_id, std::move(data), - display_id, false); + window_tree_client()->OnTopLevelCreated( + change_id, std::move(data), display_id, false, cc::FrameSinkId(1, 1)); EXPECT_FALSE(window_tree_host->window()->TargetVisibility()); @@ -1121,8 +1123,8 @@ uint32_t change_id; ASSERT_TRUE(window_tree()->GetAndRemoveFirstChangeOfType( WindowTreeChangeType::NEW_TOP_LEVEL, &change_id)); - window_tree_client()->OnTopLevelCreated(change_id, std::move(data), - display_id, true); + window_tree_client()->OnTopLevelCreated( + change_id, std::move(data), display_id, true, cc::FrameSinkId(1, 1)); EXPECT_EQ( 0u, window_tree()->GetChangeCountForType(WindowTreeChangeType::VISIBLE)); @@ -1173,7 +1175,8 @@ ASSERT_TRUE(window_tree()->GetAndRemoveFirstChangeOfType( WindowTreeChangeType::NEW_TOP_LEVEL, &new_window_in_flight_change_id)); window_tree_client()->OnTopLevelCreated(new_window_in_flight_change_id, - std::move(data), display_id, true); + std::move(data), display_id, true, + cc::FrameSinkId(1, 1)); // The only value that should take effect is the property for 'yy' as it was // not in flight. @@ -1395,8 +1398,8 @@ WindowTreeChangeType::NEW_TOP_LEVEL, &change_id)); const int64_t display_id = 1; - window_tree_client()->OnTopLevelCreated(change_id, std::move(data), - display_id, true); + window_tree_client()->OnTopLevelCreated( + change_id, std::move(data), display_id, true, cc::FrameSinkId(1, 1)); EXPECT_EQ(initial_root_count, window_tree_client_impl()->GetRoots().size()); } @@ -1413,7 +1416,7 @@ mojo::ConvertTo<std::vector<uint8_t>>(kUnknownPropertyValue); std::unique_ptr<WindowTreeHostMus> window_tree_host = base::MakeUnique<WindowTreeHostMus>(window_tree_client_impl(), - &properties); + cc::FrameSinkId(1, 1), &properties); window_tree_host->InitHost(); window_tree_host->window()->Show(); // Verify the property made it to the window. @@ -1700,8 +1703,8 @@ data->window_id = server_id(top_level); data->visible = true; const int64_t display_id = 1; - window_tree_client()->OnTopLevelCreated(change_id, std::move(data), - display_id, true); + window_tree_client()->OnTopLevelCreated( + change_id, std::move(data), display_id, true, cc::FrameSinkId(1, 1)); EXPECT_EQ( 0u, window_tree()->GetChangeCountForType(WindowTreeChangeType::VISIBLE)); EXPECT_TRUE(top_level->TargetVisibility()); @@ -1909,8 +1912,8 @@ uint32_t change_id; ASSERT_TRUE(window_tree()->GetAndRemoveFirstChangeOfType( WindowTreeChangeType::NEW_TOP_LEVEL, &change_id)); - window_tree_client()->OnTopLevelCreated(change_id, std::move(data), - display_id, true); + window_tree_client()->OnTopLevelCreated( + change_id, std::move(data), display_id, true, cc::FrameSinkId(1, 1)); // aura::Window should operate in DIP and aura::WindowTreeHost should operate // in pixels.
diff --git a/ui/aura/mus/window_tree_host_mus.cc b/ui/aura/mus/window_tree_host_mus.cc index fb04b99a..880b541 100644 --- a/ui/aura/mus/window_tree_host_mus.cc +++ b/ui/aura/mus/window_tree_host_mus.cc
@@ -42,6 +42,7 @@ std::unique_ptr<WindowPortMus> window_port, WindowTreeClient* window_tree_client, int64_t display_id, + const cc::FrameSinkId& frame_sink_id, const std::map<std::string, std::vector<uint8_t>>* properties) : WindowTreeHostPlatform(std::move(window_port)), display_id_(display_id), @@ -49,17 +50,16 @@ window()->SetProperty(kWindowTreeHostMusKey, this); // TODO(sky): find a cleaner way to set this! Better solution is to likely // have constructor take aura::Window. - WindowPortMus::Get(window())->window_ = window(); + WindowPortMus* window_mus = WindowPortMus::Get(window()); + window_mus->window_ = window(); if (properties) { // Apply the properties before initializing the window, that way the // server seems them at the time the window is created. - WindowMus* window_mus = WindowMus::Get(window()); for (auto& pair : *properties) window_mus->SetPropertyFromServer(pair.first, &pair.second); } - Id server_id = WindowMus::Get(window())->server_id(); - cc::FrameSinkId frame_sink_id(server_id, 0); - DCHECK(frame_sink_id.is_valid()); + // TODO(fsamuel): Once the display compositor is decoupled from the browser + // process then ui::Compositor will not a cc::FrameSinkId. CreateCompositor(frame_sink_id); gfx::AcceleratedWidget accelerated_widget; if (IsUsingTestContext()) { @@ -93,6 +93,9 @@ // Mus windows are assumed hidden. compositor()->SetVisible(false); + + if (frame_sink_id.is_valid()) + window_mus->SetFrameSinkIdFromServer(frame_sink_id); } // Pass |properties| to CreateWindowPortForTopLevel() so that |properties| @@ -101,12 +104,14 @@ // properties may be server specific and not applied to the Window. WindowTreeHostMus::WindowTreeHostMus( WindowTreeClient* window_tree_client, + const cc::FrameSinkId& frame_sink_id, const std::map<std::string, std::vector<uint8_t>>* properties) : WindowTreeHostMus( static_cast<WindowTreeHostMusDelegate*>(window_tree_client) ->CreateWindowPortForTopLevel(properties), window_tree_client, display::Screen::GetScreen()->GetPrimaryDisplay().id(), + frame_sink_id, properties) {} WindowTreeHostMus::~WindowTreeHostMus() {
diff --git a/ui/aura/mus/window_tree_host_mus.h b/ui/aura/mus/window_tree_host_mus.h index 047ffde..c9faf58 100644 --- a/ui/aura/mus/window_tree_host_mus.h +++ b/ui/aura/mus/window_tree_host_mus.h
@@ -38,6 +38,7 @@ std::unique_ptr<WindowPortMus> window_port, WindowTreeClient* window_tree_client, int64_t display_id, + const cc::FrameSinkId& frame_sink_id = cc::FrameSinkId(), const std::map<std::string, std::vector<uint8_t>>* properties = nullptr); // This constructor is intended for creating top level windows in @@ -48,6 +49,7 @@ // TODO: this should take an unordered_map, see http://crbug.com/670515. explicit WindowTreeHostMus( WindowTreeClient* window_tree_client, + const cc::FrameSinkId& frame_sink_id = cc::FrameSinkId(), const std::map<std::string, std::vector<uint8_t>>* properties = nullptr); ~WindowTreeHostMus() override;
diff --git a/ui/aura/mus/window_tree_host_mus_unittest.cc b/ui/aura/mus/window_tree_host_mus_unittest.cc index ef119bf..041b1adc 100644 --- a/ui/aura/mus/window_tree_host_mus_unittest.cc +++ b/ui/aura/mus/window_tree_host_mus_unittest.cc
@@ -14,7 +14,8 @@ TEST_F(WindowTreeHostMusTest, UpdateClientArea) { std::unique_ptr<WindowTreeHostMus> window_tree_host_mus = - base::MakeUnique<WindowTreeHostMus>(window_tree_client_impl()); + base::MakeUnique<WindowTreeHostMus>(window_tree_client_impl(), + cc::FrameSinkId()); gfx::Insets new_insets(10, 11, 12, 13); window_tree_host_mus->SetClientArea(new_insets, std::vector<gfx::Rect>()); @@ -23,7 +24,8 @@ TEST_F(WindowTreeHostMusTest, SetHitTestMask) { std::unique_ptr<WindowTreeHostMus> window_tree_host_mus = - base::MakeUnique<WindowTreeHostMus>(window_tree_client_impl()); + base::MakeUnique<WindowTreeHostMus>(window_tree_client_impl(), + cc::FrameSinkId()); EXPECT_FALSE(window_tree()->last_hit_test_mask().has_value()); gfx::Rect mask(10, 10, 10, 10);
diff --git a/ui/aura/test/mus/window_tree_client_private.cc b/ui/aura/test/mus/window_tree_client_private.cc index 40f77eb0..5ab768a 100644 --- a/ui/aura/test/mus/window_tree_client_private.cc +++ b/ui/aura/test/mus/window_tree_client_private.cc
@@ -29,7 +29,8 @@ const int64_t display_id = 1; const Id focused_window_id = 0; tree_client_impl_->OnEmbedImpl(window_tree, 1, std::move(root_data), - display_id, focused_window_id, true); + display_id, focused_window_id, true, + cc::FrameSinkId(1, 1)); } WindowTreeHostMus* WindowTreeClientPrivate::CallWmNewDisplayAdded( @@ -47,8 +48,8 @@ const display::Display& display, ui::mojom::WindowDataPtr root_data, bool parent_drawn) { - return tree_client_impl_->WmNewDisplayAddedImpl(display, std::move(root_data), - parent_drawn); + return tree_client_impl_->WmNewDisplayAddedImpl( + display, std::move(root_data), parent_drawn, cc::FrameSinkId(1, 1)); } void WindowTreeClientPrivate::CallOnWindowInputEvent(
diff --git a/ui/views/mus/desktop_window_tree_host_mus.cc b/ui/views/mus/desktop_window_tree_host_mus.cc index acb70db..39d256e6 100644 --- a/ui/views/mus/desktop_window_tree_host_mus.cc +++ b/ui/views/mus/desktop_window_tree_host_mus.cc
@@ -185,8 +185,10 @@ DesktopWindowTreeHostMus::DesktopWindowTreeHostMus( internal::NativeWidgetDelegate* native_widget_delegate, DesktopNativeWidgetAura* desktop_native_widget_aura, + const cc::FrameSinkId& frame_sink_id, const std::map<std::string, std::vector<uint8_t>>* mus_properties) : aura::WindowTreeHostMus(MusClient::Get()->window_tree_client(), + frame_sink_id, mus_properties), native_widget_delegate_(native_widget_delegate), desktop_native_widget_aura_(desktop_native_widget_aura),
diff --git a/ui/views/mus/desktop_window_tree_host_mus.h b/ui/views/mus/desktop_window_tree_host_mus.h index 457748c1..30d2370 100644 --- a/ui/views/mus/desktop_window_tree_host_mus.h +++ b/ui/views/mus/desktop_window_tree_host_mus.h
@@ -35,6 +35,7 @@ DesktopWindowTreeHostMus( internal::NativeWidgetDelegate* native_widget_delegate, DesktopNativeWidgetAura* desktop_native_widget_aura, + const cc::FrameSinkId& frame_sink_id, const std::map<std::string, std::vector<uint8_t>>* mus_properties); ~DesktopWindowTreeHostMus() override;
diff --git a/ui/views/mus/mus_client.cc b/ui/views/mus/mus_client.cc index 055f695..a2c40a4a 100644 --- a/ui/views/mus/mus_client.cc +++ b/ui/views/mus/mus_client.cc
@@ -261,7 +261,7 @@ std::map<std::string, std::vector<uint8_t>> mus_properties = ConfigurePropertiesFromParams(init_params); return base::MakeUnique<DesktopWindowTreeHostMus>( - delegate, desktop_native_widget_aura, &mus_properties); + delegate, desktop_native_widget_aura, cc::FrameSinkId(), &mus_properties); } void MusClient::OnEmbed(
diff --git a/ui/views/test/native_widget_factory_aura_mus.cc b/ui/views/test/native_widget_factory_aura_mus.cc index faa7fd6..292e7a9 100644 --- a/ui/views/test/native_widget_factory_aura_mus.cc +++ b/ui/views/test/native_widget_factory_aura_mus.cc
@@ -32,7 +32,8 @@ MusClient::Get()->ConfigurePropertiesFromParams(init_params); desktop_native_widget_aura->SetDesktopWindowTreeHost( base::MakeUnique<DesktopWindowTreeHostMus>( - widget, desktop_native_widget_aura, &mus_properties)); + widget, desktop_native_widget_aura, cc::FrameSinkId(), + &mus_properties)); return desktop_native_widget_aura; }