Automated commit: libchrome r1193442 uprev
Merge with upstream commit 3b22940051ba1a1e4080ba1d00f52f3d1bede1c0
BUG=None
TEST=sudo emerge libchrome
Change-Id: I7c411455fbf83d90172fd81e40fdebfcf7a4de25
diff --git a/BASE_VER b/BASE_VER
index c0d4b80..6a66cc6 100644
--- a/BASE_VER
+++ b/BASE_VER
@@ -1 +1 @@
-1192953
+1193442
diff --git a/base/allocator/partition_alloc_features.cc b/base/allocator/partition_alloc_features.cc
index 9fd0399..c107962 100644
--- a/base/allocator/partition_alloc_features.cc
+++ b/base/allocator/partition_alloc_features.cc
@@ -4,11 +4,15 @@
#include "base/allocator/partition_alloc_features.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_root.h"
+#include "base/allocator/partition_allocator/thread_cache.h"
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/features.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "build/chromecast_buildflags.h"
#include "build/chromeos_buildflags.h"
@@ -355,5 +359,57 @@
&kEnableConfigurableThreadCacheMultiplier,
"ThreadCacheMultiplierForAndroid", 1.};
+constexpr partition_alloc::internal::base::TimeDelta ToPartitionAllocTimeDelta(
+ base::TimeDelta time_delta) {
+ return partition_alloc::internal::base::Microseconds(
+ time_delta.InMicroseconds());
+}
+
+constexpr base::TimeDelta FromPartitionAllocTimeDelta(
+ partition_alloc::internal::base::TimeDelta time_delta) {
+ return base::Microseconds(time_delta.InMicroseconds());
+}
+
+BASE_FEATURE(kEnableConfigurableThreadCachePurgeInterval,
+ "EnableConfigurableThreadCachePurgeInterval",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
+const base::FeatureParam<base::TimeDelta> kThreadCacheMinPurgeInterval{
+ &kEnableConfigurableThreadCachePurgeInterval, "ThreadCacheMinPurgeInterval",
+ FromPartitionAllocTimeDelta(partition_alloc::kMinPurgeInterval)};
+
+const base::FeatureParam<base::TimeDelta> kThreadCacheMaxPurgeInterval{
+ &kEnableConfigurableThreadCachePurgeInterval, "ThreadCacheMaxPurgeInterval",
+ FromPartitionAllocTimeDelta(partition_alloc::kMaxPurgeInterval)};
+
+const base::FeatureParam<base::TimeDelta> kThreadCacheDefaultPurgeInterval{
+ &kEnableConfigurableThreadCachePurgeInterval,
+ "ThreadCacheDefaultPurgeInterval",
+ FromPartitionAllocTimeDelta(partition_alloc::kDefaultPurgeInterval)};
+
+const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMinPurgeInterval() {
+ return ToPartitionAllocTimeDelta(kThreadCacheMinPurgeInterval.Get());
+}
+
+const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMaxPurgeInterval() {
+ return ToPartitionAllocTimeDelta(kThreadCacheMaxPurgeInterval.Get());
+}
+
+const partition_alloc::internal::base::TimeDelta
+GetThreadCacheDefaultPurgeInterval() {
+ return ToPartitionAllocTimeDelta(kThreadCacheDefaultPurgeInterval.Get());
+}
+
+BASE_FEATURE(kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
+ "EnableConfigurableThreadCacheMinCachedMemoryForPurging",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
+const base::FeatureParam<int> kThreadCacheMinCachedMemoryForPurgingBytes{
+ &kEnableConfigurableThreadCacheMinCachedMemoryForPurging,
+ "ThreadCacheMinCachedMemoryForPurgingBytes",
+ partition_alloc::kMinCachedMemoryForPurgingBytes};
+
} // namespace features
} // namespace base
diff --git a/base/allocator/partition_alloc_features.h b/base/allocator/partition_alloc_features.h
index f2ff569..0814df8 100644
--- a/base/allocator/partition_alloc_features.h
+++ b/base/allocator/partition_alloc_features.h
@@ -5,6 +5,7 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
#define BASE_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
+#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_root.h"
#include "base/base_export.h"
@@ -12,6 +13,7 @@
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "base/strings/string_piece.h"
+#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
@@ -204,6 +206,19 @@
extern const BASE_EXPORT base::FeatureParam<double>
kThreadCacheMultiplierForAndroid;
+BASE_EXPORT BASE_DECLARE_FEATURE(kEnableConfigurableThreadCachePurgeInterval);
+extern const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMinPurgeInterval();
+extern const partition_alloc::internal::base::TimeDelta
+GetThreadCacheMaxPurgeInterval();
+extern const partition_alloc::internal::base::TimeDelta
+GetThreadCacheDefaultPurgeInterval();
+
+BASE_EXPORT BASE_DECLARE_FEATURE(
+ kEnableConfigurableThreadCacheMinCachedMemoryForPurging);
+extern const BASE_EXPORT base::FeatureParam<int>
+ kThreadCacheMinCachedMemoryForPurgingBytes;
+
} // namespace features
} // namespace base
diff --git a/base/allocator/partition_alloc_support.cc b/base/allocator/partition_alloc_support.cc
index ee9c08e9..775d061 100644
--- a/base/allocator/partition_alloc_support.cc
+++ b/base/allocator/partition_alloc_support.cc
@@ -1283,6 +1283,12 @@
// initialized later.
DCHECK(process_type != switches::kZygoteProcess);
+ partition_alloc::ThreadCacheRegistry::Instance().SetPurgingConfiguration(
+ base::features::GetThreadCacheMinPurgeInterval(),
+ base::features::GetThreadCacheMaxPurgeInterval(),
+ base::features::GetThreadCacheDefaultPurgeInterval(),
+ size_t(base::features::kThreadCacheMinCachedMemoryForPurgingBytes.Get()));
+
base::allocator::StartThreadCachePeriodicPurge();
if (base::FeatureList::IsEnabled(
diff --git a/base/allocator/partition_allocator/partition_root.h b/base/allocator/partition_allocator/partition_root.h
index d5c4e07..d4b2e6c 100644
--- a/base/allocator/partition_allocator/partition_root.h
+++ b/base/allocator/partition_allocator/partition_root.h
@@ -499,25 +499,6 @@
PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocNoHooks(size_t requested_size,
size_t slot_span_alignment)
PA_MALLOC_ALIGNED;
- // Deprecated compatibility method.
- // TODO(mikt): remove this once all third party usage is gone.
- PA_ALWAYS_INLINE PA_MALLOC_FN void* AllocWithFlags(unsigned int flags,
- size_t requested_size,
- const char* type_name)
- PA_MALLOC_ALIGNED {
- // These conditional branching should be optimized away.
- if (flags == (AllocFlags::kReturnNull)) {
- return AllocInline<AllocFlags::kReturnNull>(requested_size, type_name);
- } else if (flags == (AllocFlags::kZeroFill)) {
- return AllocInline<AllocFlags::kZeroFill>(requested_size, type_name);
- } else if (flags == (AllocFlags::kReturnNull | AllocFlags::kZeroFill)) {
- return AllocInline<AllocFlags::kReturnNull | AllocFlags::kZeroFill>(
- requested_size, type_name);
- } else {
- PA_CHECK(0);
- PA_NOTREACHED();
- }
- }
template <unsigned int flags = 0>
PA_NOINLINE void* Realloc(void* ptr,
@@ -536,15 +517,6 @@
const char* type_name) PA_MALLOC_ALIGNED {
return ReallocInline<AllocFlags::kReturnNull>(ptr, new_size, type_name);
}
- // Deprecated compatibility method.
- // TODO(mikt): remove this once all third party usage is gone.
- PA_NOINLINE void* ReallocWithFlags(unsigned int flags,
- void* ptr,
- size_t new_size,
- const char* type_name) PA_MALLOC_ALIGNED {
- PA_CHECK(flags == AllocFlags::kReturnNull);
- return ReallocInline<AllocFlags::kReturnNull>(ptr, new_size, type_name);
- }
template <unsigned int flags = 0>
PA_NOINLINE void Free(void* object);
@@ -1287,7 +1259,7 @@
if (PartitionAllocHooks::AreHooksEnabled()) {
// A valid |root| might not be available if this function is called from
- // |FreeWithFlagsInUnknownRoot| and not deducible if object originates from
+ // |FreeInUnknownRoot| and not deducible if object originates from
// an override hook.
// TODO(crbug.com/1137393): See if we can make the root available more
// reliably or even make this function non-static.
diff --git a/base/allocator/partition_allocator/thread_cache.cc b/base/allocator/partition_allocator/thread_cache.cc
index 2ceb669..bfeb1ef 100644
--- a/base/allocator/partition_allocator/thread_cache.cc
+++ b/base/allocator/partition_allocator/thread_cache.cc
@@ -14,6 +14,7 @@
#include "base/allocator/partition_allocator/partition_alloc_base/component_export.h"
#include "base/allocator/partition_allocator/partition_alloc_base/debug/debugging_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_base/immediate_crash.h"
+#include "base/allocator/partition_allocator/partition_alloc_base/time/time.h"
#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_config.h"
@@ -69,10 +70,6 @@
static bool g_thread_cache_key_created = false;
} // namespace
-constexpr internal::base::TimeDelta ThreadCacheRegistry::kMinPurgeInterval;
-constexpr internal::base::TimeDelta ThreadCacheRegistry::kMaxPurgeInterval;
-constexpr internal::base::TimeDelta ThreadCacheRegistry::kDefaultPurgeInterval;
-constexpr size_t ThreadCacheRegistry::kMinCachedMemoryForPurging;
uint8_t ThreadCache::global_limits_[ThreadCache::kBucketCount];
// Start with the normal size, not the maximum one.
@@ -244,12 +241,28 @@
}
}
+void ThreadCacheRegistry::SetPurgingConfiguration(
+ const internal::base::TimeDelta min_purge_interval,
+ const internal::base::TimeDelta max_purge_interval,
+ const internal::base::TimeDelta default_purge_interval,
+ size_t min_cached_memory_for_purging_bytes) {
+ PA_CHECK(min_purge_interval <= default_purge_interval);
+ PA_CHECK(default_purge_interval <= max_purge_interval);
+ min_purge_interval_ = min_purge_interval;
+ max_purge_interval_ = max_purge_interval;
+ default_purge_interval_ = default_purge_interval;
+ min_cached_memory_for_purging_bytes_ = min_cached_memory_for_purging_bytes;
+ is_purging_configured_ = true;
+}
+
void ThreadCacheRegistry::RunPeriodicPurge() {
if (!periodic_purge_is_initialized_) {
ThreadCache::EnsureThreadSpecificDataInitialized();
periodic_purge_is_initialized_ = true;
}
+ PA_CHECK(is_purging_configured_);
+
// Summing across all threads can be slow, but is necessary. Otherwise we rely
// on the assumption that the current thread is a good proxy for overall
// allocation activity. This is not the case for all process types.
@@ -284,15 +297,15 @@
// scheduled purge with a small enough interval. This is the case for instance
// of a renderer moving to foreground. To mitigate that, if cached memory
// jumps is very large, make a greater leap to faster purging.
- if (cached_memory_approx > 10 * kMinCachedMemoryForPurging) {
+ if (cached_memory_approx > 10 * min_cached_memory_for_purging_bytes_) {
periodic_purge_next_interval_ =
- std::min(kDefaultPurgeInterval, periodic_purge_next_interval_ / 2);
- } else if (cached_memory_approx > 2 * kMinCachedMemoryForPurging) {
+ std::min(default_purge_interval_, periodic_purge_next_interval_ / 2);
+ } else if (cached_memory_approx > 2 * min_cached_memory_for_purging_bytes_) {
periodic_purge_next_interval_ =
- std::max(kMinPurgeInterval, periodic_purge_next_interval_ / 2);
- } else if (cached_memory_approx < kMinCachedMemoryForPurging) {
+ std::max(min_purge_interval_, periodic_purge_next_interval_ / 2);
+ } else if (cached_memory_approx < min_cached_memory_for_purging_bytes_) {
periodic_purge_next_interval_ =
- std::min(kMaxPurgeInterval, periodic_purge_next_interval_ * 2);
+ std::min(max_purge_interval_, periodic_purge_next_interval_ * 2);
}
// Make sure that the next interval is in the right bounds. Even though the
@@ -304,7 +317,7 @@
// background threads, but only ask them to purge their own cache at the next
// allocation).
periodic_purge_next_interval_ = std::clamp(
- periodic_purge_next_interval_, kMinPurgeInterval, kMaxPurgeInterval);
+ periodic_purge_next_interval_, min_purge_interval_, max_purge_interval_);
PurgeAll();
}
@@ -315,7 +328,7 @@
}
void ThreadCacheRegistry::ResetForTesting() {
- periodic_purge_next_interval_ = kDefaultPurgeInterval;
+ periodic_purge_next_interval_ = default_purge_interval_;
}
// static
diff --git a/base/allocator/partition_allocator/thread_cache.h b/base/allocator/partition_allocator/thread_cache.h
index 588cc90..36fab33 100644
--- a/base/allocator/partition_allocator/thread_cache.h
+++ b/base/allocator/partition_allocator/thread_cache.h
@@ -89,6 +89,14 @@
"");
};
+constexpr internal::base::TimeDelta kMinPurgeInterval =
+ internal::base::Seconds(1);
+constexpr internal::base::TimeDelta kMaxPurgeInterval =
+ internal::base::Minutes(1);
+constexpr internal::base::TimeDelta kDefaultPurgeInterval =
+ 2 * kMinPurgeInterval;
+constexpr size_t kMinCachedMemoryForPurgingBytes = 500 * 1024;
+
// Global registry of all ThreadCache instances.
//
// This class cannot allocate in the (Un)registerThreadCache() functions, as
@@ -135,6 +143,26 @@
void SetThreadCacheMultiplier(float multiplier);
void SetLargestActiveBucketIndex(uint8_t largest_active_bucket_index);
+ // Controls the thread cache purging configuration.
+ void SetPurgingConfiguration(
+ const internal::base::TimeDelta min_purge_interval,
+ const internal::base::TimeDelta max_purge_interval,
+ const internal::base::TimeDelta default_purge_interval,
+ size_t min_cached_memory_for_purging_bytes);
+ internal::base::TimeDelta min_purge_interval() const {
+ return min_purge_interval_;
+ }
+ internal::base::TimeDelta max_purge_interval() const {
+ return max_purge_interval_;
+ }
+ internal::base::TimeDelta default_purge_interval() const {
+ return default_purge_interval_;
+ }
+ size_t min_cached_memory_for_purging_bytes() const {
+ return min_cached_memory_for_purging_bytes_;
+ }
+ bool is_purging_configured() const { return is_purging_configured_; }
+
static internal::Lock& GetLock() { return Instance().lock_; }
// Purges all thread caches *now*. This is completely thread-unsafe, and
// should only be called in a post-fork() handler.
@@ -142,14 +170,6 @@
void ResetForTesting();
- static constexpr internal::base::TimeDelta kMinPurgeInterval =
- internal::base::Seconds(1);
- static constexpr internal::base::TimeDelta kMaxPurgeInterval =
- internal::base::Minutes(1);
- static constexpr internal::base::TimeDelta kDefaultPurgeInterval =
- 2 * kMinPurgeInterval;
- static constexpr size_t kMinCachedMemoryForPurging = 500 * 1024;
-
private:
friend class tools::ThreadCacheInspector;
friend class tools::HeapDumper;
@@ -158,8 +178,12 @@
internal::Lock lock_;
ThreadCache* list_head_ PA_GUARDED_BY(GetLock()) = nullptr;
bool periodic_purge_is_initialized_ = false;
- internal::base::TimeDelta periodic_purge_next_interval_ =
- kDefaultPurgeInterval;
+ internal::base::TimeDelta min_purge_interval_;
+ internal::base::TimeDelta max_purge_interval_;
+ internal::base::TimeDelta default_purge_interval_;
+ size_t min_cached_memory_for_purging_bytes_ = 0u;
+ internal::base::TimeDelta periodic_purge_next_interval_;
+ bool is_purging_configured_ = false;
uint8_t largest_active_bucket_index_ = internal::BucketIndexLookup::GetIndex(
ThreadCacheLimits::kDefaultSizeThreshold);
diff --git a/base/allocator/partition_allocator/thread_cache_unittest.cc b/base/allocator/partition_allocator/thread_cache_unittest.cc
index b6aee77..7e83912 100644
--- a/base/allocator/partition_allocator/thread_cache_unittest.cc
+++ b/base/allocator/partition_allocator/thread_cache_unittest.cc
@@ -104,6 +104,9 @@
ThreadCacheRegistry::Instance().SetThreadCacheMultiplier(
ThreadCache::kDefaultMultiplier);
+ ThreadCacheRegistry::Instance().SetPurgingConfiguration(
+ kMinPurgeInterval, kMaxPurgeInterval, kDefaultPurgeInterval,
+ kMinCachedMemoryForPurgingBytes);
ThreadCache::SetLargestCachedSize(ThreadCache::kLargeSizeThreshold);
// Make sure that enough slot spans have been touched, otherwise cache fill
@@ -781,42 +784,41 @@
registry.GetPeriodicPurgeNextIntervalInMicroseconds());
};
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), registry.default_purge_interval());
// Small amount of memory, the period gets longer.
auto* tcache = ThreadCache::Get();
ASSERT_LT(tcache->CachedMemory(),
- ThreadCacheRegistry::kMinCachedMemoryForPurging);
+ registry.min_cached_memory_for_purging_bytes());
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
// Check that the purge interval is clamped at the maximum value.
- while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
+ while (NextInterval() < registry.max_purge_interval()) {
registry.RunPeriodicPurge();
}
registry.RunPeriodicPurge();
// Not enough memory to decrease the interval.
- FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging +
+ FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() + 1);
+ registry.RunPeriodicPurge();
+ EXPECT_EQ(NextInterval(), registry.max_purge_interval());
+
+ FillThreadCacheWithMemory(2 * registry.min_cached_memory_for_purging_bytes() +
1);
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
-
- FillThreadCacheWithMemory(
- 2 * ThreadCacheRegistry::kMinCachedMemoryForPurging + 1);
- registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
+ EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
// Enough memory, interval doesn't change.
- FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging);
+ FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes());
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval / 2);
+ EXPECT_EQ(NextInterval(), registry.max_purge_interval() / 2);
// No cached memory, increase the interval.
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
+ EXPECT_EQ(NextInterval(), registry.max_purge_interval());
// Cannot test the very large size with only one thread, this is tested below
// in the multiple threads test.
@@ -858,9 +860,10 @@
bucket_distribution_(bucket_distribution) {}
void ThreadMain() override {
- FillThreadCacheWithMemory(
- root_, 5 * ThreadCacheRegistry::kMinCachedMemoryForPurging,
- bucket_distribution_);
+ FillThreadCacheWithMemory(root_,
+ 5 * ThreadCacheRegistry::Instance()
+ .min_cached_memory_for_purging_bytes(),
+ bucket_distribution_);
allocations_done_.fetch_add(1, std::memory_order_release);
// This thread needs to be alive when the next periodic purge task runs.
@@ -885,28 +888,27 @@
return internal::base::Microseconds(
registry.GetPeriodicPurgeNextIntervalInMicroseconds());
};
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), registry.default_purge_interval());
// Small amount of memory, the period gets longer.
auto* tcache = ThreadCache::Get();
ASSERT_LT(tcache->CachedMemory(),
- ThreadCacheRegistry::kMinCachedMemoryForPurging);
+ registry.min_cached_memory_for_purging_bytes());
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 2 * ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), 2 * registry.default_purge_interval());
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), 4 * ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), 4 * registry.default_purge_interval());
// Check that the purge interval is clamped at the maximum value.
- while (NextInterval() < ThreadCacheRegistry::kMaxPurgeInterval) {
+ while (NextInterval() < registry.max_purge_interval()) {
registry.RunPeriodicPurge();
}
registry.RunPeriodicPurge();
// Not enough memory on this thread to decrease the interval.
- FillThreadCacheWithMemory(ThreadCacheRegistry::kMinCachedMemoryForPurging /
- 2);
+ FillThreadCacheWithMemory(registry.min_cached_memory_for_purging_bytes() / 2);
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kMaxPurgeInterval);
+ EXPECT_EQ(NextInterval(), registry.max_purge_interval());
std::atomic<int> allocations_done{0};
std::atomic<bool> can_finish{false};
@@ -926,7 +928,7 @@
// Many allocations on the other thread.
registry.RunPeriodicPurge();
- EXPECT_EQ(NextInterval(), ThreadCacheRegistry::kDefaultPurgeInterval);
+ EXPECT_EQ(NextInterval(), registry.default_purge_interval());
can_finish.store(true, std::memory_order_release);
internal::base::PlatformThreadForTesting::Join(thread_handle);
diff --git a/base/features.cc b/base/features.cc
index bffd3ca..784865e 100644
--- a/base/features.cc
+++ b/base/features.cc
@@ -44,7 +44,14 @@
// population to collect data.
BASE_FEATURE(kPartialLowEndModeOnMidRangeDevices,
"PartialLowEndModeOnMidRangeDevices",
- base::FEATURE_ENABLED_BY_DEFAULT);
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
+// A parameter to exclude or not exclude LowEndBackgroundCleanup from
+// PartialLowModeOnMidRangeDevices. This is used to see how
+// LowEndBackGroundCleanup affects total count of memory.gpu.privatefootprints.
+const FeatureParam<bool> kPartialLowEndModeExcludeLowEndBackgroundCleanup{
+ &kPartialLowEndModeOnMidRangeDevices, "exculde-low-end-background-cleanup",
+ false};
#endif // BUILDFLAG(IS_ANDROID)
diff --git a/base/system/sys_info.h b/base/system/sys_info.h
index 2e78264..a5cb70f 100644
--- a/base/system/sys_info.h
+++ b/base/system/sys_info.h
@@ -10,6 +10,7 @@
#include <map>
#include <string>
+#include <string_view>
#include "base/base_export.h"
#include "base/functional/callback_forward.h"
@@ -113,17 +114,54 @@
// Returns a descriptive string for the current machine model or an empty
// string if the machine model is unknown or an error occurred.
// e.g. "MacPro1,1" on Mac, "iPhone9,3" on iOS or "Nexus 5" on Android. Only
- // implemented on OS X, iOS, Android, Chrome OS and Windows. This returns an
+ // implemented on macOS, iOS, Android, Chrome OS and Windows. This returns an
// empty string on other platforms.
+ //
+ // For macOS, a useful reference of the resulting strings returned by this
+ // function and their corresponding hardware can be found at
+ // https://everymac.com/systems/by_capability/mac-specs-by-machine-model-machine-id.html
static std::string HardwareModelName();
+#if BUILDFLAG(IS_MAC)
+ struct HardwareModelNameSplit {
+ std::string category;
+ int model = 0;
+ int variant = 0;
+ };
+ // Hardware model names on the Mac are of the shape "Mac𝓍,𝓎" where the
+ // prefix is the general category, the 𝓍 is the model, and the 𝓎 is the
+ // variant. This function takes the hardware model name as returned by
+ // HardwareModelName() above, and returns it split into its constituent parts.
+ // Returns nullopt if the value cannot be parsed.
+ //
+ // /!\ WARNING
+ //
+ // This is NOT A USEFUL FUNCTION and SHOULD NOT BE USED. While the `model`
+ // value does inform as to what generation of hardware it is within the
+ // `category`, this is not useful in determining the capabilities of the
+ // hardware. Instead of using the `model` value, check the actual capabilities
+ // of the hardware to verify what it can do rather than relying on a hardware
+ // model name. In addition, while the `category` value used to have meaning
+ // and could be used to determine the type of hardware (e.g. desktop vs
+ // laptop), in 2022 Apple started using the generic category of "Mac", thus
+ // removing its usefulness when used alone. While the entire model string as
+ // returned by HardwareModelName() above can be useful for identifying a
+ // specific piece of equipment, splitting apart it is not useful.
+ //
+ // Do not add any further callers! When the aforementioned 2022-era hardware
+ // is the minimum requirement for Chromium, remove this function and adjust
+ // all callers appropriately.
+ static absl::optional<HardwareModelNameSplit> SplitHardwareModelNameDoNotUse(
+ std::string_view name);
+#endif
+
struct HardwareInfo {
std::string manufacturer;
std::string model;
};
// Returns via |callback| a struct containing descriptive UTF-8 strings for
// the current machine manufacturer and model, or empty strings if the
- // information is unknown or an error occurred. Implemented on Windows, OS X,
+ // information is unknown or an error occurred. Implemented on Windows, macOS,
// iOS, Linux, Chrome OS and Android.
static void GetHardwareInfo(base::OnceCallback<void(HardwareInfo)> callback);
diff --git a/base/task/sequence_manager/sequence_manager_impl.cc b/base/task/sequence_manager/sequence_manager_impl.cc
index 3136039..b285bae 100644
--- a/base/task/sequence_manager/sequence_manager_impl.cc
+++ b/base/task/sequence_manager/sequence_manager_impl.cc
@@ -707,8 +707,7 @@
SelectTaskOption option) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- if (auto priority =
- main_thread_only().selector.GetHighestPendingPriority(option)) {
+ if (main_thread_only().selector.GetHighestPendingPriority(option)) {
// If the selector has non-empty queues we trivially know there is immediate
// work to be done. However we may want to yield to native work if it is
// more important.
@@ -720,8 +719,7 @@
// do this always.
ReloadEmptyWorkQueues();
- if (auto priority =
- main_thread_only().selector.GetHighestPendingPriority(option)) {
+ if (main_thread_only().selector.GetHighestPendingPriority(option)) {
return WakeUp{};
}
diff --git a/base/test/repeating_test_future_unittest.cc b/base/test/repeating_test_future_unittest.cc
index 7ecb777..9c5eced 100644
--- a/base/test/repeating_test_future_unittest.cc
+++ b/base/test/repeating_test_future_unittest.cc
@@ -124,12 +124,12 @@
test::ScopedRunLoopTimeout timeout(FROM_HERE, Milliseconds(1));
// `ScopedRunLoopTimeout` will automatically fail the test when a timeout
- // happens, so we use EXPECT_FATAL_FAILURE to handle this failure.
- // EXPECT_FATAL_FAILURE only works on static objects.
+ // happens, so we use EXPECT_NONFATAL_FAILURE to handle this failure.
+ // EXPECT_NONFATAL_FAILURE only works on static objects.
static bool success;
static RepeatingTestFuture<std::string> future;
- EXPECT_FATAL_FAILURE({ success = future.Wait(); }, "timed out");
+ EXPECT_NONFATAL_FAILURE({ success = future.Wait(); }, "timed out");
EXPECT_FALSE(success);
}
diff --git a/base/test/run_until_unittest.cc b/base/test/run_until_unittest.cc
index e6eb201..5fc3510 100644
--- a/base/test/run_until_unittest.cc
+++ b/base/test/run_until_unittest.cc
@@ -112,8 +112,8 @@
// EXPECT_FATAL_FAILURE only works on static objects.
static bool success;
- EXPECT_FATAL_FAILURE({ success = RunUntil([]() { return false; }); },
- "timed out");
+ EXPECT_NONFATAL_FAILURE({ success = RunUntil([]() { return false; }); },
+ "timed out");
EXPECT_FALSE(success);
}
diff --git a/base/test/scoped_run_loop_timeout.cc b/base/test/scoped_run_loop_timeout.cc
index 1c0df98..ca6a7da 100644
--- a/base/test/scoped_run_loop_timeout.cc
+++ b/base/test/scoped_run_loop_timeout.cc
@@ -41,7 +41,18 @@
const Location& timeout_enabled_from_here,
RepeatingCallback<std::string()> on_timeout_log,
const Location& run_from_here) {
- GTEST_FAIL_AT(run_from_here.file_name(), run_from_here.line_number())
+ // Add a non-fatal failure to GTest result and cause the test to fail.
+ // A non-fatal failure is preferred over a fatal one because LUCI Analysis
+ // will select the fatal failure over the non-fatal one as the primary error
+ // message for the test. The RunLoop::Run() function is generally called by
+ // the test framework and generates similar error messages and stack traces,
+ // making it difficult to cluster the failures. Making the failure non-fatal
+ // will propagate the ASSERT fatal failures in the test body as the primary
+ // error message.
+ // Also note that, the GTest fatal failure will not actually stop the test
+ // execution if not directly used in the test body. A non-fatal/fatal failure
+ // here makes no difference to the test running flow.
+ ADD_FAILURE_AT(run_from_here.file_name(), run_from_here.line_number())
<< TimeoutMessage(on_timeout_log, timeout_enabled_from_here);
}
diff --git a/base/test/scoped_run_loop_timeout_unittest.cc b/base/test/scoped_run_loop_timeout_unittest.cc
index cadd0bc..2ee6832 100644
--- a/base/test/scoped_run_loop_timeout_unittest.cc
+++ b/base/test/scoped_run_loop_timeout_unittest.cc
@@ -38,9 +38,9 @@
SequencedTaskRunner::GetCurrentDefault()->PostDelayedTask(
FROM_HERE, MakeExpectedRunClosure(FROM_HERE), kArbitraryTimeout);
- // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+ // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
static RunLoop& static_loop = run_loop;
- EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+ EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
}
TEST(ScopedRunLoopTimeoutTest, RunTasksUntilTimeout) {
@@ -56,9 +56,9 @@
SequencedTaskRunner::GetCurrentDefault()->PostDelayedTask(
FROM_HERE, MakeExpectedRunClosure(FROM_HERE), kArbitraryTimeout);
- // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+ // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
static RunLoop& static_loop = run_loop;
- EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+ EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
}
TEST(ScopedRunLoopTimeoutTest, TimesOutWithInheritedTimeoutValue) {
@@ -87,9 +87,9 @@
EXPECT_CALL(log_callback, Run).WillOnce(testing::Return(std::string()));
- // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+ // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
static RunLoop& static_loop = run_loop;
- EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+ EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
}
TEST(ScopedRunLoopTimeoutTest, RunTasksUntilTimeoutWithInheritedTimeoutValue) {
@@ -111,9 +111,9 @@
EXPECT_CALL(log_callback, Run).WillOnce(testing::Return(std::string()));
- // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+ // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
static RunLoop& static_loop = run_loop;
- EXPECT_FATAL_FAILURE(static_loop.Run(), "Run() timed out.");
+ EXPECT_NONFATAL_FAILURE(static_loop.Run(), "Run() timed out.");
}
namespace {
@@ -143,10 +143,10 @@
location, kArbitraryTimeout,
BindRepeating([]() -> std::string { return kErrorMessage; }));
- // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+ // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
static RunLoop& static_loop = run_loop;
- EXPECT_FATAL_FAILURE(static_loop.Run(),
- GetExpectedTimeoutMessage(location, kErrorMessage));
+ EXPECT_NONFATAL_FAILURE(static_loop.Run(),
+ GetExpectedTimeoutMessage(location, kErrorMessage));
}
TEST(ScopedRunLoopTimeoutTest, OnTimeoutLogWithNestedTimeouts) {
@@ -162,10 +162,10 @@
location, kArbitraryTimeout,
BindRepeating([]() -> std::string { return kErrorMessage; }));
- // EXPECT_FATAL_FAILURE() can only reference globals and statics.
+ // EXPECT_NONFATAL_FAILURE() can only reference globals and statics.
static RunLoop& static_loop = run_loop;
- EXPECT_FATAL_FAILURE(static_loop.Run(),
- GetExpectedTimeoutMessage(location, kErrorMessage));
+ EXPECT_NONFATAL_FAILURE(static_loop.Run(),
+ GetExpectedTimeoutMessage(location, kErrorMessage));
}
} // namespace base::test
diff --git a/base/test/task_environment_unittest.cc b/base/test/task_environment_unittest.cc
index a8ee403..e340e33 100644
--- a/base/test/task_environment_unittest.cc
+++ b/base/test/task_environment_unittest.cc
@@ -993,13 +993,13 @@
}
static auto& static_on_timeout_cb = run_timeout->on_timeout;
#if defined(__clang__) && defined(_MSC_VER)
- EXPECT_FATAL_FAILURE(
+ EXPECT_NONFATAL_FAILURE(
static_on_timeout_cb.Run(FROM_HERE),
"RunLoop::Run() timed out. Timeout set at "
// We don't test the line number but it would be present.
"TaskEnvironment@base\\test\\task_environment.cc:");
#else
- EXPECT_FATAL_FAILURE(
+ EXPECT_NONFATAL_FAILURE(
static_on_timeout_cb.Run(FROM_HERE),
"RunLoop::Run() timed out. Timeout set at "
// We don't test the line number but it would be present.
diff --git a/base/test/test_future_unittest.cc b/base/test/test_future_unittest.cc
index 0faff8c..f8991a9 100644
--- a/base/test/test_future_unittest.cc
+++ b/base/test/test_future_unittest.cc
@@ -99,7 +99,7 @@
static bool success;
static TestFuture<AnyType> future;
- EXPECT_FATAL_FAILURE({ success = future.Wait(); }, "timed out");
+ EXPECT_NONFATAL_FAILURE({ success = future.Wait(); }, "timed out");
EXPECT_FALSE(success);
}
diff --git a/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql
index 6bc3230..1100fa2 100644
--- a/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql
+++ b/base/tracing/stdlib/chrome/scroll_jank/scroll_jank_v3.sql
@@ -391,65 +391,3 @@
FROM
chrome_janky_frames
GROUP BY scroll_id;
-
--- An "intermediate" view for computing `chrome_scroll_jank_v3_output` below.
---
--- @column trace_num_frames The number of frames in the trace.
--- @column trace_num_janky_frames The number of delayed/janky frames in the
--- trace.
--- @column vsync_interval The standard vsync interval.
--- @column scrolls A proto amalgamation of metrics per scroll
--- including the number of frames, number of
--- janky frames, percent of janky frames,
--- maximum presentation delay, and the causes
--- of jank (cause, sub-cause, delay).
-CREATE VIEW chrome_scroll_jank_v3_intermediate AS
-SELECT
- -- MAX does not matter for these aggregations, since the values are the
- -- same across rows.
- (SELECT COUNT(*) FROM chrome_janky_frame_info_with_delay)
- AS trace_num_frames,
- (SELECT COUNT(*) FROM chrome_janky_frames)
- AS trace_num_janky_frames,
- causes.vsync_interval,
- RepeatedField(
- ChromeScrollJankV3_Scroll(
- 'num_frames',
- frames.num_frames,
- 'num_janky_frames',
- frames.num_janky_frames,
- 'scroll_jank_percentage',
- frames.scroll_jank_percentage,
- 'max_delay_since_last_frame',
- causes.max_delay_since_last_frame,
- 'scroll_jank_causes',
- causes.scroll_jank_causes))
- AS scrolls
-FROM
- chrome_frames_per_scroll AS frames
-INNER JOIN chrome_causes_per_scroll AS causes
- ON frames.scroll_id = causes.scroll_id;
-
--- For producing a "native" Perfetto UI metric.
---
--- @column scroll_jank_summary A proto amalgamation summarizing all of the
--- scroll jank in a trace, including the number
--- of frames, janky frames, percentage of janky
--- frames, vsync interval, and a summary of this
--- data (including individual causes) for each
--- scroll.
-CREATE VIEW chrome_scroll_jank_v3_output AS
-SELECT
- ChromeScrollJankV3(
- 'trace_num_frames',
- trace_num_frames,
- 'trace_num_janky_frames',
- trace_num_janky_frames,
- 'trace_scroll_jank_percentage',
- 100.0 * trace_num_janky_frames / trace_num_frames,
- 'vsync_interval_ms',
- vsync_interval,
- 'scrolls',
- scrolls) AS scroll_jank_summary
-FROM
- chrome_scroll_jank_v3_intermediate;