Automated commit: libchrome r1211408 uprev
Merge with upstream commit 5496d0b2b59204271ee1d5c6600635a4542a8d35
BUG=None
TEST=sudo emerge libchrome
Change-Id: Ib25833899c4fcf87d4eb8cdd2b5931a122fe2ab2
diff --git a/BASE_VER b/BASE_VER
index e1b1070..63f5e27 100644
--- a/BASE_VER
+++ b/BASE_VER
@@ -1 +1 @@
-1209867
+1211408
diff --git a/base/allocator/allocator.gni b/base/allocator/allocator.gni
index 47b8c1e..02ef1f3 100644
--- a/base/allocator/allocator.gni
+++ b/base/allocator/allocator.gni
@@ -2,8 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import(
- "//base/allocator/partition_allocator/src/partition_alloc/partition_alloc.gni")
+import("//base/allocator/partition_allocator/partition_alloc.gni")
# Chromium-specific asserts. External embedders _may_ elect to use these
# features even without PA-E.
diff --git a/base/allocator/partition_alloc_features.cc b/base/allocator/partition_alloc_features.cc
index b3d88e5..6d8363b 100644
--- a/base/allocator/partition_alloc_features.cc
+++ b/base/allocator/partition_alloc_features.cc
@@ -329,7 +329,7 @@
FEATURE_DISABLED_BY_DEFAULT);
#endif
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// A parameter to exclude or not exclude PartitionAllocSupport from
// PartialLowModeOnMidRangeDevices. This is used to see how it affects
// renderer performances, e.g. blink_perf.parser benchmark.
diff --git a/base/allocator/partition_alloc_features.h b/base/allocator/partition_alloc_features.h
index ea5035c..27ae62d 100644
--- a/base/allocator/partition_alloc_features.h
+++ b/base/allocator/partition_alloc_features.h
@@ -191,7 +191,7 @@
BASE_EXPORT BASE_DECLARE_FEATURE(kPageAllocatorRetryOnCommitFailure);
#endif
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
extern const base::FeatureParam<bool>
kPartialLowEndModeExcludePartitionAllocSupport;
#endif
diff --git a/base/allocator/partition_alloc_support.cc b/base/allocator/partition_alloc_support.cc
index e6425d2..aec8f7e 100644
--- a/base/allocator/partition_alloc_support.cc
+++ b/base/allocator/partition_alloc_support.cc
@@ -1303,7 +1303,7 @@
base::features::kThreadCacheMultiplier.Get());
#endif // BUILDFLAG(IS_ANDROID)
} else {
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// If kEnableConfigurableThreadCacheMultiplier is not enabled, lower
// thread cache limits on Android low end device to avoid stranding too much
// memory in the caches.
@@ -1313,7 +1313,7 @@
.SetThreadCacheMultiplier(
::partition_alloc::ThreadCache::kDefaultMultiplier / 2.);
}
-#endif // BUILDFLAG(IS_ANDROID)
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
}
// Renderer processes are more performance-sensitive, increase thread cache
diff --git a/base/allocator/partition_allocator/src/partition_alloc/PartitionAlloc.md b/base/allocator/partition_allocator/PartitionAlloc.md
similarity index 100%
rename from base/allocator/partition_allocator/src/partition_alloc/PartitionAlloc.md
rename to base/allocator/partition_allocator/PartitionAlloc.md
diff --git a/base/allocator/partition_allocator/src/partition_alloc/build_config.md b/base/allocator/partition_allocator/build_config.md
similarity index 96%
rename from base/allocator/partition_allocator/src/partition_alloc/build_config.md
rename to base/allocator/partition_allocator/build_config.md
index 064f53f..84951d5 100644
--- a/base/allocator/partition_allocator/src/partition_alloc/build_config.md
+++ b/base/allocator/partition_allocator/build_config.md
@@ -8,7 +8,7 @@
*** promo
Most of what you'll want to know exists between
-* [`//base/allocator/partition_allocator/src/partition_alloc/BUILD.gn`][pa-build-gn],
+* [`//base/allocator/partition_allocator/BUILD.gn`][pa-build-gn],
* Everything else ending in `.gn` or `.gni` in
`//base/allocator/partition_allocator/src/partition_alloc/`,
* [`allocator.gni`][allocator-gni],
@@ -93,7 +93,7 @@
[gn-declare-args]: https://gn.googlesource.com/gn/+/refs/heads/main/docs/reference.md#func_declare_args
[buildflag-header]: https://source.chromium.org/chromium/chromium/src/+/main:build/buildflag_header.gni
-[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/src/partition_alloc/BUILD.gn
+[pa-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/partition_allocator/BUILD.gn
[allocator-gni]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/allocator.gni
[base-allocator-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/allocator/BUILD.gn
[base-build-gn]: https://source.chromium.org/chromium/chromium/src/+/main:base/BUILD.gn
diff --git a/base/allocator/partition_allocator/src/partition_alloc/build_overrides/build.gni b/base/allocator/partition_allocator/build_overrides/build.gni
similarity index 100%
rename from base/allocator/partition_allocator/src/partition_alloc/build_overrides/build.gni
rename to base/allocator/partition_allocator/build_overrides/build.gni
diff --git a/base/allocator/partition_allocator/src/partition_alloc/build_overrides/partition_alloc.gni b/base/allocator/partition_allocator/build_overrides/partition_alloc.gni
similarity index 100%
rename from base/allocator/partition_allocator/src/partition_alloc/build_overrides/partition_alloc.gni
rename to base/allocator/partition_allocator/build_overrides/partition_alloc.gni
diff --git a/base/allocator/partition_allocator/src/partition_alloc/external_builds.md b/base/allocator/partition_allocator/external_builds.md
similarity index 100%
rename from base/allocator/partition_allocator/src/partition_alloc/external_builds.md
rename to base/allocator/partition_allocator/external_builds.md
diff --git a/base/allocator/partition_allocator/src/partition_alloc/glossary.md b/base/allocator/partition_allocator/glossary.md
similarity index 100%
rename from base/allocator/partition_allocator/src/partition_alloc/glossary.md
rename to base/allocator/partition_allocator/glossary.md
diff --git a/base/allocator/partition_allocator/src/partition_alloc/partition_alloc.gni b/base/allocator/partition_allocator/partition_alloc.gni
similarity index 100%
rename from base/allocator/partition_allocator/src/partition_alloc/partition_alloc.gni
rename to base/allocator/partition_allocator/partition_alloc.gni
diff --git a/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h b/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h
index 0d7ffd4..89f5845 100644
--- a/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h
+++ b/base/allocator/partition_allocator/src/partition_alloc/encoded_next_freelist.h
@@ -74,7 +74,7 @@
};
// Freelist entries are encoded for security reasons. See
-// //base/allocator/partition_allocator/src/partition_alloc/PartitionAlloc.md
+// //base/allocator/partition_allocator/PartitionAlloc.md
// and |Transform()| for the rationale and mechanism, respectively.
class EncodedNextFreelistEntry {
private:
diff --git a/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java b/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java
index 2bea896..753b8e4 100644
--- a/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java
+++ b/base/android/java/src/org/chromium/base/ApiCompatibilityUtils.java
@@ -29,12 +29,9 @@
import android.view.View;
import android.view.textclassifier.TextClassifier;
import android.widget.TextView;
-
import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
-import androidx.annotation.OptIn;
import androidx.annotation.RequiresApi;
-
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
@@ -289,7 +286,6 @@
* passed to Chrome from a backgrounded app.
* @param options {@ActivityOptions} to set the required mode to.
*/
- @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
public static void setActivityOptionsBackgroundActivityStartMode(
@NonNull ActivityOptions options) {
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.UPSIDE_DOWN_CAKE) return;
@@ -302,7 +298,6 @@
* See https://crbug.com/1427112
* @param view The view on which to set the handwriting bounds.
*/
- @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
public static void clearHandwritingBoundsOffsetBottom(View view) {
// TODO(crbug.com/1427112): Replace uses of this method with direct calls once the API is
// available.
diff --git a/base/android/java/src/org/chromium/base/BuildInfo.java b/base/android/java/src/org/chromium/base/BuildInfo.java
index 5f74a01..533a96d 100644
--- a/base/android/java/src/org/chromium/base/BuildInfo.java
+++ b/base/android/java/src/org/chromium/base/BuildInfo.java
@@ -18,11 +18,7 @@
import android.os.Process;
import android.text.TextUtils;
-import androidx.annotation.OptIn;
-import androidx.core.os.BuildCompat;
-
-import org.jni_zero.CalledByNative;
-
+import org.chromium.base.annotations.CalledByNative;
import org.chromium.base.compat.ApiHelperForP;
import org.chromium.build.BuildConfig;
@@ -82,6 +78,10 @@
public final boolean isTV;
/** Whether we're running on an Android Automotive OS device or not. */
public final boolean isAutomotive;
+
+ /** Whether we're running on an Android Foldable OS device or not. */
+ public final boolean isFoldable;
+
/**
* version of the FEATURE_VULKAN_DEQP_LEVEL, if available. Queried only on Android T or above
*/
@@ -97,42 +97,41 @@
}
/** Returns a serialized string array of all properties of this class. */
- @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
private String[] getAllProperties() {
// This implementation needs to be kept in sync with the native BuildInfo constructor.
return new String[] {
- Build.BRAND,
- Build.DEVICE,
- Build.ID,
- Build.MANUFACTURER,
- Build.MODEL,
- String.valueOf(Build.VERSION.SDK_INT),
- Build.TYPE,
- Build.BOARD,
- hostPackageName,
- String.valueOf(hostVersionCode),
- hostPackageLabel,
- packageName,
- String.valueOf(versionCode),
- versionName,
- androidBuildFingerprint,
- gmsVersionCode,
- installerPackageName,
- abiString,
- customThemes,
- resourcesVersion,
- String.valueOf(
- ContextUtils.getApplicationContext().getApplicationInfo().targetSdkVersion),
- isDebugAndroid() ? "1" : "0",
- isTV ? "1" : "0",
- Build.VERSION.INCREMENTAL,
- Build.HARDWARE,
- isAtLeastT() ? "1" : "0",
- isAutomotive ? "1" : "0",
- BuildCompat.isAtLeastU() ? "1" : "0",
- targetsAtLeastU() ? "1" : "0",
- Build.VERSION.CODENAME,
- String.valueOf(vulkanDeqpLevel),
+ Build.BRAND,
+ Build.DEVICE,
+ Build.ID,
+ Build.MANUFACTURER,
+ Build.MODEL,
+ String.valueOf(Build.VERSION.SDK_INT),
+ Build.TYPE,
+ Build.BOARD,
+ hostPackageName,
+ String.valueOf(hostVersionCode),
+ hostPackageLabel,
+ packageName,
+ String.valueOf(versionCode),
+ versionName,
+ androidBuildFingerprint,
+ gmsVersionCode,
+ installerPackageName,
+ abiString,
+ customThemes,
+ resourcesVersion,
+ String.valueOf(
+ ContextUtils.getApplicationContext().getApplicationInfo().targetSdkVersion),
+ isDebugAndroid() ? "1" : "0",
+ isTV ? "1" : "0",
+ Build.VERSION.INCREMENTAL,
+ Build.HARDWARE,
+ Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU ? "1" : "0",
+ isAutomotive ? "1" : "0",
+ Build.VERSION.SDK_INT >= VERSION_CODES.UPSIDE_DOWN_CAKE ? "1" : "0",
+ targetsAtLeastU() ? "1" : "0",
+ Build.VERSION.CODENAME,
+ String.valueOf(vulkanDeqpLevel),
};
}
@@ -326,6 +325,11 @@
}
this.isAutomotive = isAutomotive;
+ // Detect whether device is foldable.
+ this.isFoldable =
+ Build.VERSION.SDK_INT >= VERSION_CODES.R
+ && pm.hasSystemFeature(PackageManager.FEATURE_SENSOR_HINGE_ANGLE);
+
int vulkanLevel = 0;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU) {
FeatureInfo[] features = pm.getSystemAvailableFeatures();
@@ -367,17 +371,6 @@
}
/**
- * @deprecated For most callers, just replace with an inline check:
- * if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.TIRAMISU)
- * For Robolectric just set the SDK level to VERSION_CODES.TIRAMISU
- */
- @Deprecated
- @OptIn(markerClass = androidx.core.os.BuildCompat.PrereleaseSdkCheck.class)
- public static boolean isAtLeastT() {
- return BuildCompat.isAtLeastT();
- }
-
- /**
* Checks if the application targets the T SDK or later.
* @deprecated Chrome callers should just remove this test - Chrome targets T or later now.
* WebView callers should just inline the logic below to check the target level of the embedding
diff --git a/base/android/java/src/org/chromium/base/IntentUtils.java b/base/android/java/src/org/chromium/base/IntentUtils.java
index 99cd4b0..24a3c2d 100644
--- a/base/android/java/src/org/chromium/base/IntentUtils.java
+++ b/base/android/java/src/org/chromium/base/IntentUtils.java
@@ -20,7 +20,6 @@
import android.text.TextUtils;
import androidx.annotation.Nullable;
-import androidx.core.app.BundleCompat;
import org.chromium.base.compat.ApiHelperForM;
import org.chromium.base.compat.ApiHelperForS;
@@ -343,12 +342,16 @@
}
/**
- * Just like {@link BundleCompat#getBinder()}, but doesn't throw exceptions.
+ * Returns the value associated with the given name, or null if no mapping of the desired type
+ * exists for the given name or a null value is explicitly associated with the name.
+ *
+ * @param name a key string
+ * @return an IBinder value, or null
*/
public static IBinder safeGetBinder(Bundle bundle, String name) {
if (bundle == null) return null;
try {
- return BundleCompat.getBinder(bundle, name);
+ return bundle.getBinder(name);
} catch (Throwable t) {
// Catches un-parceling exceptions.
Log.e(TAG, "getBinder failed on bundle " + bundle);
@@ -371,8 +374,6 @@
/**
* Inserts a {@link Binder} value into an Intent as an extra.
*
- * Uses {@link BundleCompat#putBinder()}, but doesn't throw exceptions.
- *
* @param intent Intent to put the binder into.
* @param name Key.
* @param binder Binder object.
@@ -381,7 +382,7 @@
if (intent == null) return;
Bundle bundle = new Bundle();
try {
- BundleCompat.putBinder(bundle, name, binder);
+ bundle.putBinder(name, binder);
} catch (Throwable t) {
// Catches parceling exceptions.
Log.e(TAG, "putBinder failed on bundle " + bundle);
diff --git a/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java
index 4951d5a..ce9ba8f 100644
--- a/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java
+++ b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplier.java
@@ -4,92 +4,34 @@
package org.chromium.base.supplier;
-import androidx.annotation.NonNull;
import androidx.annotation.Nullable;
import org.chromium.base.Callback;
-import org.chromium.base.Promise;
-import org.chromium.base.ThreadUtils;
/**
- * Abstract lazy implementation of {@link OneshotSupplier} to be used by classes owning a
- * OneshotSupplier and providing it as a dependency to others. This class differs from
- * {@link OneshotSupplierImpl} in that a call to {@link LazyOneshotSupplier#get()} will attempt to
- * set the supplied object via {@link LazyOneshotSupplier#doSet()}. Additionally,
- * {@link LazyOneshotSupplier#onAvailable(Callback<T>)} will not call
- * {@link LazyOneshotSupplier#get()} unless it already has a value to prevent eager initialization.
- *
- * If eager initialization in response to {@link LazyOneshotSupplier#onAvailable(Callback<T>)} is
- * required then a call to {@link LazyOneshotSupplier#get()} can be made just before attaching
- * the callback.
- *
- * <p>Instances of this class must only be accessed from the thread they were created on.
- *
- * To use:
- * <ol>
- * <li>Create a new {@code LazyOneshotSupplier<T>} to pass as a dependency.
- * <li>Override {@link #doSet()} to invoke {@link #set(T)}. This will be invoked when
- * {@link #get()} is invoked if {@link #hasValue()} returns false. Note that invoking
- * {@link #doSet()} does not have to invoke {@link #set(T)} if there is reason not to
- * such as awaiting an async dependency. However, if this is the case clients of the
- * supplier need to be careful to properly understand the initialization lifecycle.
- * </ol>
+ * Wraps a lazy-loaded nullable object, notifying observers a single time when the dependency
+ * becomes available. This intentionally doesn't extend {@link OneshotSupplier} to support the
+ * supplied value being null.
*
* @param <T> The type of the wrapped object.
*/
-public abstract class LazyOneshotSupplier<T> implements OneshotSupplier<T> {
- private final Promise<T> mPromise = new Promise<>();
- private final ThreadUtils.ThreadChecker mThreadChecker = new ThreadUtils.ThreadChecker();
-
+public interface LazyOneshotSupplier<T> {
/**
- * Lazily invokes the callback the first time {@link #set(T)} is
- * invoked or immediately if already available.
- */
- @Override
- public T onAvailable(Callback<T> callback) {
- mThreadChecker.assertOnValidThread();
- mPromise.then(callback);
- return hasValue() ? get() : null;
- }
-
- /**
- * Return the value of the supplier. Calling this the first time will initialize the
- * value in the supplier via {@link #doSet()}.
- * @return the value that was provided in {@link #set(T)} or null.
- */
- @Override
- public @Nullable T get() {
- mThreadChecker.assertOnValidThread();
- if (!hasValue()) {
- doSet();
- }
- return hasValue() ? mPromise.getResult() : null;
- }
-
- /**
- * Returns whether a value is set in the supplier.
- */
- @Override
- public boolean hasValue() {
- return mPromise.isFulfilled();
- }
-
- /**
- * Sets the value upon first {@link #get()}. Implementers should override this to invoke
- * {@link #set(T)}.
- */
- public abstract void doSet();
-
- /**
- * Set the object supplied by this supplier. This will notify registered callbacks that the
- * dependency is available. If set() has already been called, this method will assert.
+ * Add a callback that's called when the object owned by this supplier is available. If the
+ * object is already available, the callback will be called at the end of the current message
+ * loop.
*
- * @param object The object to supply.
+ * @param callback The callback to be called.
*/
- public void set(@NonNull T object) {
- mThreadChecker.assertOnValidThread();
- assert !mPromise.isFulfilled();
- assert object != null;
- mPromise.fulfill(object);
- }
+ void onAvailable(Callback<T> callback);
+
+ /**
+ * Returns the value currently held or <code>null</code> when none is held. Use {@link
+ * #hasValue} to tell if the value is intentionally null.
+ */
+ @Nullable
+ T get();
+
+ /** Returns whether the supplier holds a value currently. */
+ boolean hasValue();
}
diff --git a/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java
new file mode 100644
index 0000000..a2fab0f
--- /dev/null
+++ b/base/android/java/src/org/chromium/base/supplier/LazyOneshotSupplierImpl.java
@@ -0,0 +1,100 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import androidx.annotation.Nullable;
+
+import org.chromium.base.Callback;
+import org.chromium.base.Promise;
+import org.chromium.base.ThreadUtils;
+
+/**
+ * Abstract implementation of {@link LazySupplier} to be used by classes providing it as a
+ * dependency to others. A call to {@link LazyOneshotSupplier#get()} will attempt to set the
+ * supplied object via {@link LazyOneshotSupplier#doSet()}. Additionally, {@link
+ * LazyOneshotSupplier#onAvailable(Callback<T>)} will not call {@link LazyOneshotSupplier#get()}
+ * unless it already has a value to prevent eager initialization. The supplied value can be null,
+ * {@link LazyOneshotSupplier#hasValue} should be used to differentiate between un/set states.
+ *
+ * <p>If eager initialization in response to {@link LazyOneshotSupplier#onAvailable(Callback<T>)} is
+ * required then a call to {@link LazyOneshotSupplier#get()} can be made just before attaching the
+ * callback.
+ *
+ * <p>Instances of this class must only be accessed from the thread they were created on.
+ *
+ * <p>To use:
+ *
+ * <ol>
+ * <li>Create a new {@code LazyOneshotSupplier<T>} to pass as a dependency.
+ * <li>Override {@link #doSet()} to invoke {@link #set(T)}. This will be invoked when {@link
+ * #get()} is invoked if {@link #hasValue()} returns false. Note that invoking {@link
+ * #doSet()} does not have to invoke {@link #set(T)} if there is reason not to such as
+ * awaiting an async dependency. However, if this is the case clients of the supplier need to
+ * be careful to properly understand the initialization lifecycle.
+ * </ol>
+ *
+ * @param <T> The type of the wrapped object.
+ */
+public abstract class LazyOneshotSupplierImpl<T> implements LazyOneshotSupplier<T> {
+ private final Promise<T> mPromise = new Promise<>();
+ private final ThreadUtils.ThreadChecker mThreadChecker = new ThreadUtils.ThreadChecker();
+
+ private boolean mDoSetCalled;
+
+ /**
+ * Lazily invokes the callback the first time {@link #set(T)} is invoked or immediately if
+ * already available.
+ */
+ @Override
+ public void onAvailable(Callback<T> callback) {
+ mThreadChecker.assertOnValidThread();
+ mPromise.then(callback);
+ }
+
+ /**
+ * Return the value of the supplier. Calling this the first time will initialize the value in
+ * the supplier via {@link #doSet()}.
+ *
+ * @return the value that was provided in {@link #set(T)} or null.
+ */
+ @Override
+ public @Nullable T get() {
+ mThreadChecker.assertOnValidThread();
+ if (!hasValue()) {
+ tryDoSet();
+ }
+ return hasValue() ? mPromise.getResult() : null;
+ }
+
+ /** Returns whether a value is set in the supplier. */
+ @Override
+ public boolean hasValue() {
+ return mPromise.isFulfilled();
+ }
+
+ /**
+ * Sets the value upon first {@link #get()}. Implementers should override this to invoke {@link
+ * #set(T)}.
+ */
+ public abstract void doSet();
+
+ /**
+ * Set the object supplied by this supplier. This will notify registered callbacks that the
+ * dependency is available. If set() has already been called, this method will assert.
+ *
+ * @param object The object to supply.
+ */
+ public void set(@Nullable T object) {
+ mThreadChecker.assertOnValidThread();
+ assert !mPromise.isFulfilled();
+ mPromise.fulfill(object);
+ }
+
+ private void tryDoSet() {
+ if (mDoSetCalled) return;
+ doSet();
+ mDoSetCalled = true;
+ }
+}
diff --git a/base/android/jni_generator/jni_generator_helper.h b/base/android/jni_generator/jni_generator_helper.h
deleted file mode 100644
index a4f9f14..0000000
--- a/base/android/jni_generator/jni_generator_helper.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2014 The Chromium Authors
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ANDROID_JNI_GENERATOR_JNI_GENERATOR_HELPER_H_
-#define BASE_ANDROID_JNI_GENERATOR_JNI_GENERATOR_HELPER_H_
-
-#include <jni.h>
-
-#include "base/android/jni_android.h"
-#include "base/android/scoped_java_ref.h"
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/memory/raw_ptr.h"
-#include "build/build_config.h"
-#include "third_party/jni_zero/jni_int_wrapper.h"
-
-// Project-specific macros used by the header files generated by
-// jni_generator.py. Different projects can then specify their own
-// implementation for this file.
-#define CHECK_NATIVE_PTR(env, jcaller, native_ptr, method_name, ...) \
- DCHECK(native_ptr) << method_name;
-
-#define CHECK_CLAZZ(env, jcaller, clazz, ...) DCHECK(clazz);
-
-#if defined(ARCH_CPU_X86)
-// Dalvik JIT generated code doesn't guarantee 16-byte stack alignment on
-// x86 - use force_align_arg_pointer to realign the stack at the JNI
-// boundary. crbug.com/655248
-#define JNI_GENERATOR_EXPORT \
- extern "C" __attribute__((visibility("default"), force_align_arg_pointer))
-#else
-#define JNI_GENERATOR_EXPORT extern "C" __attribute__((visibility("default")))
-#endif
-
-// Used to export JNI registration functions.
-#if defined(COMPONENT_BUILD)
-#define JNI_REGISTRATION_EXPORT __attribute__((visibility("default")))
-#else
-#define JNI_REGISTRATION_EXPORT
-#endif
-
-namespace jni_generator {
-
-inline void HandleRegistrationError(JNIEnv* env,
- jclass clazz,
- const char* filename) {
- LOG(ERROR) << "RegisterNatives failed in " << filename;
-}
-
-inline void CheckException(JNIEnv* env) {
- base::android::CheckException(env);
-}
-
-// A 32 bit number could be an address on stack. Random 64 bit marker on the
-// stack is much less likely to be present on stack.
-constexpr uint64_t kJniStackMarkerValue = 0xbdbdef1bebcade1b;
-
-// Context about the JNI call with exception checked to be stored in stack.
-struct BASE_EXPORT JniJavaCallContextUnchecked {
- ALWAYS_INLINE JniJavaCallContextUnchecked() {
-// TODO(ssid): Implement for other architectures.
-#if defined(__arm__) || defined(__aarch64__)
- // This assumes that this method does not increment the stack pointer.
- asm volatile("mov %0, sp" : "=r"(sp));
-#else
- sp = 0;
-#endif
- }
-
- // Force no inline to reduce code size.
- template <base::android::MethodID::Type type>
- NOINLINE void Init(JNIEnv* env,
- jclass clazz,
- const char* method_name,
- const char* jni_signature,
- std::atomic<jmethodID>* atomic_method_id) {
- env1 = env;
-
- // Make sure compiler doesn't optimize out the assignment.
- memcpy(&marker, &kJniStackMarkerValue, sizeof(kJniStackMarkerValue));
- // Gets PC of the calling function.
- pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
-
- method_id = base::android::MethodID::LazyGet<type>(
- env, clazz, method_name, jni_signature, atomic_method_id);
- }
-
- NOINLINE ~JniJavaCallContextUnchecked() {
- // Reset so that spurious marker finds are avoided.
- memset(&marker, 0, sizeof(marker));
- }
-
- uint64_t marker;
- uintptr_t sp;
- uintptr_t pc;
-
- raw_ptr<JNIEnv> env1;
- jmethodID method_id;
-};
-
-// Context about the JNI call with exception unchecked to be stored in stack.
-struct BASE_EXPORT JniJavaCallContextChecked {
- // Force no inline to reduce code size.
- template <base::android::MethodID::Type type>
- NOINLINE void Init(JNIEnv* env,
- jclass clazz,
- const char* method_name,
- const char* jni_signature,
- std::atomic<jmethodID>* atomic_method_id) {
- base.Init<type>(env, clazz, method_name, jni_signature, atomic_method_id);
- // Reset |pc| to correct caller.
- base.pc = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
- }
-
- NOINLINE ~JniJavaCallContextChecked() {
- jni_generator::CheckException(base.env1);
- }
-
- JniJavaCallContextUnchecked base;
-};
-
-static_assert(sizeof(JniJavaCallContextChecked) ==
- sizeof(JniJavaCallContextUnchecked),
- "Stack unwinder cannot work with structs of different sizes.");
-
-} // namespace jni_generator
-
-#endif // BASE_ANDROID_JNI_GENERATOR_JNI_GENERATOR_HELPER_H_
diff --git a/base/android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java b/base/android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java
new file mode 100644
index 0000000..b8fdfd6
--- /dev/null
+++ b/base/android/junit/src/org/chromium/base/supplier/LazyOneshotSupplierImplTest.java
@@ -0,0 +1,72 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package org.chromium.base.supplier;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.MockitoAnnotations;
+import org.mockito.Spy;
+import org.robolectric.annotation.Config;
+import org.robolectric.annotation.LooperMode;
+import org.robolectric.shadows.ShadowProcess;
+
+import org.chromium.base.Callback;
+import org.chromium.base.test.BaseRobolectricTestRunner;
+
+/** Unit tests for {@link LazyOneshotSupplierImpl}. */
+@RunWith(BaseRobolectricTestRunner.class)
+@Config(shadows = {ShadowProcess.class})
+@LooperMode(LooperMode.Mode.LEGACY)
+public class LazyOneshotSupplierImplTest {
+ @Spy
+ private LazyOneshotSupplierImpl<String> mSupplier =
+ new LazyOneshotSupplierImpl<>() {
+ @Override
+ public void doSet() {
+ set("answer");
+ }
+ };
+
+ @Spy private Callback<String> mCallback1;
+ @Spy private Callback<String> mCallback2;
+
+ @Before
+ public void setup() {
+ MockitoAnnotations.initMocks(this);
+ }
+
+ @Test
+ public void testSetBeforeDoSet() {
+ assertFalse(mSupplier.hasValue());
+ mSupplier.set("answer");
+
+ mSupplier.onAvailable(mCallback1);
+ mSupplier.onAvailable(mCallback2);
+
+ assertTrue(mSupplier.hasValue());
+ verify(mCallback1).onResult("answer");
+ verify(mCallback2).onResult("answer");
+ verify(mSupplier, times(0)).doSet();
+ }
+
+ @Test
+ public void testDoSetCalledOnce() {
+ mSupplier.onAvailable(mCallback1);
+ verify(mSupplier, times(0)).doSet();
+
+ assertEquals("answer", mSupplier.get());
+ assertEquals("answer", mSupplier.get());
+
+ verify(mCallback1).onResult("answer");
+ verify(mSupplier).doSet();
+ }
+}
diff --git a/base/android/scoped_java_ref.h b/base/android/scoped_java_ref.h
index 3245d06..326e012 100644
--- a/base/android/scoped_java_ref.h
+++ b/base/android/scoped_java_ref.h
@@ -125,7 +125,7 @@
// template parameter.
template <typename ElementType,
typename T_ = T,
- typename = std::enable_if_t<std::is_same<T_, jobjectArray>::value>>
+ typename = std::enable_if_t<std::is_same_v<T_, jobjectArray>>>
JavaObjectArrayReader<ElementType> ReadElements() const {
return JavaObjectArrayReader<ElementType>(*this);
}
@@ -192,7 +192,7 @@
// Copy conversion constructor.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef(const ScopedJavaLocalRef<U>& other) : env_(other.env_) {
JavaRef<T>::SetNewLocalRef(env_, other.obj());
}
@@ -205,7 +205,7 @@
// Move conversion constructor.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef(ScopedJavaLocalRef<U>&& other) : env_(other.env_) {
JavaRef<T>::steal(std::move(other));
}
@@ -235,7 +235,7 @@
// Copy conversion assignment.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef& operator=(const ScopedJavaLocalRef<U>& other) {
Reset(other);
return *this;
@@ -243,7 +243,7 @@
// Move assignment.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaLocalRef& operator=(ScopedJavaLocalRef<U>&& other) {
env_ = other.env_;
Reset();
@@ -260,7 +260,7 @@
void Reset() { JavaRef<T>::ResetLocalRef(env_); }
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
void Reset(const ScopedJavaLocalRef<U>& other) {
// We can copy over env_ here as |other| instance must be from the same
// thread as |this| local ref. (See class comment for multi-threading
@@ -316,7 +316,7 @@
// Copy conversion constructor.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef(const ScopedJavaGlobalRef<U>& other) {
Reset(other);
}
@@ -329,7 +329,7 @@
// Move conversion constructor.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef(ScopedJavaGlobalRef<U>&& other) {
JavaRef<T>::steal(std::move(other));
}
@@ -357,7 +357,7 @@
// Copy conversion assignment.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef& operator=(const ScopedJavaGlobalRef<U>& other) {
Reset(other);
return *this;
@@ -365,7 +365,7 @@
// Move assignment.
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
ScopedJavaGlobalRef& operator=(ScopedJavaGlobalRef<U>&& other) {
Reset();
JavaRef<T>::steal(std::move(other));
@@ -381,7 +381,7 @@
void Reset() { JavaRef<T>::ResetGlobalRef(); }
template <typename U,
- typename = std::enable_if_t<std::is_convertible<U, T>::value>>
+ typename = std::enable_if_t<std::is_convertible_v<U, T>>>
void Reset(const ScopedJavaGlobalRef<U>& other) {
Reset(nullptr, other.obj());
}
diff --git a/base/android/scoped_java_ref_unittest.cc b/base/android/scoped_java_ref_unittest.cc
index b50a5d9..ec41ffa 100644
--- a/base/android/scoped_java_ref_unittest.cc
+++ b/base/android/scoped_java_ref_unittest.cc
@@ -284,18 +284,18 @@
JavaObjectArrayReader<jobject> reader(array_);
It i = reader.begin();
- EXPECT_TRUE(std::is_copy_constructible<It>::value);
+ EXPECT_TRUE(std::is_copy_constructible_v<It>);
It copy = i;
EXPECT_EQ(copy, i);
EXPECT_EQ(It(i), i);
- EXPECT_TRUE(std::is_copy_assignable<It>::value);
+ EXPECT_TRUE(std::is_copy_assignable_v<It>);
It assign = reader.end();
It& assign2 = (assign = i);
EXPECT_EQ(assign, i);
EXPECT_EQ(assign2, assign);
- EXPECT_TRUE(std::is_destructible<It>::value);
+ EXPECT_TRUE(std::is_destructible_v<It>);
// Swappable
It left = reader.begin(), right = reader.end();
diff --git a/base/apple/scoped_nsobject.h b/base/apple/scoped_nsobject.h
index 2276bfb..10d25b6 100644
--- a/base/apple/scoped_nsobject.h
+++ b/base/apple/scoped_nsobject.h
@@ -86,7 +86,7 @@
public:
using scoped_nsprotocol<NST*>::scoped_nsprotocol;
- static_assert(std::is_same<NST, NSAutoreleasePool>::value == false,
+ static_assert(std::is_same_v<NST, NSAutoreleasePool> == false,
"Use @autoreleasepool instead");
};
diff --git a/base/containers/buffer_iterator.h b/base/containers/buffer_iterator.h
index f8a0073..9fdcaba 100644
--- a/base/containers/buffer_iterator.h
+++ b/base/containers/buffer_iterator.h
@@ -58,8 +58,8 @@
template <typename B>
class BufferIterator {
public:
- static_assert(std::is_same<std::remove_const_t<B>, char>::value ||
- std::is_same<std::remove_const_t<B>, unsigned char>::value,
+ static_assert(std::is_same_v<std::remove_const_t<B>, char> ||
+ std::is_same_v<std::remove_const_t<B>, unsigned char>,
"Underlying buffer type must be char-type.");
BufferIterator() {}
diff --git a/base/containers/buffer_iterator_unittest.nc b/base/containers/buffer_iterator_unittest.nc
index ca1f8f3..c99c50e 100644
--- a/base/containers/buffer_iterator_unittest.nc
+++ b/base/containers/buffer_iterator_unittest.nc
@@ -21,7 +21,7 @@
std::string string_;
};
-#if defined(NCTEST_BUFFER_ITERATOR_CREATE_TYPE_UINT16) // [r"fatal error: static_assert failed due to requirement 'std::is_same<unsigned short, char>::value || std::is_same<unsigned short, unsigned char>::value': Underlying buffer type must be char-type."]
+#if defined(NCTEST_BUFFER_ITERATOR_CREATE_TYPE_UINT16) // [r"fatal error: static_assert failed due to requirement 'std::is_same_v<unsigned short, char> || std::is_same_v<unsigned short, unsigned char>': Underlying buffer type must be char-type."]
void WontCompile() {
constexpr size_t size = 64;
diff --git a/base/containers/checked_iterators.h b/base/containers/checked_iterators.h
index bed8ac6..0c24014 100644
--- a/base/containers/checked_iterators.h
+++ b/base/containers/checked_iterators.h
@@ -58,7 +58,7 @@
// See https://wg21.link/n4042 for details.
template <
typename U,
- std::enable_if_t<std::is_convertible<U (*)[], T (*)[]>::value>* = nullptr>
+ std::enable_if_t<std::is_convertible_v<U (*)[], T (*)[]>>* = nullptr>
constexpr CheckedContiguousIterator(const CheckedContiguousIterator<U>& other)
: start_(other.start_), current_(other.current_), end_(other.end_) {
// We explicitly don't delegate to the 3-argument constructor here. Its
diff --git a/base/containers/enum_set.h b/base/containers/enum_set.h
index 79ed4df..41070d6 100644
--- a/base/containers/enum_set.h
+++ b/base/containers/enum_set.h
@@ -48,7 +48,7 @@
class EnumSet {
private:
static_assert(
- std::is_enum<E>::value,
+ std::is_enum_v<E>,
"First template parameter of EnumSet must be an enumeration type");
using enum_underlying_type = std::underlying_type_t<E>;
diff --git a/base/containers/extend_unittest.cc b/base/containers/extend_unittest.cc
index 2e005fb..d1189dc 100644
--- a/base/containers/extend_unittest.cc
+++ b/base/containers/extend_unittest.cc
@@ -29,8 +29,8 @@
return a.c_ == b.c_;
}
-static_assert(std::is_move_constructible<NonCopyable>::value, "");
-static_assert(!std::is_copy_constructible<NonCopyable>::value, "");
+static_assert(std::is_move_constructible_v<NonCopyable>, "");
+static_assert(!std::is_copy_constructible_v<NonCopyable>, "");
struct CopyableMovable {
bool copied_;
diff --git a/base/containers/flat_map.h b/base/containers/flat_map.h
index 618b50d..dc5ec9f 100644
--- a/base/containers/flat_map.h
+++ b/base/containers/flat_map.h
@@ -235,12 +235,12 @@
iterator insert_or_assign(const_iterator hint, K&& key, M&& obj);
template <class K, class... Args>
- std::enable_if_t<std::is_constructible<key_type, K&&>::value,
+ std::enable_if_t<std::is_constructible_v<key_type, K&&>,
std::pair<iterator, bool>>
try_emplace(K&& key, Args&&... args);
template <class K, class... Args>
- std::enable_if_t<std::is_constructible<key_type, K&&>::value, iterator>
+ std::enable_if_t<std::is_constructible_v<key_type, K&&>, iterator>
try_emplace(const_iterator hint, K&& key, Args&&... args);
// --------------------------------------------------------------------------
@@ -324,7 +324,7 @@
template <class K, class... Args>
auto flat_map<Key, Mapped, Compare, Container>::try_emplace(K&& key,
Args&&... args)
- -> std::enable_if_t<std::is_constructible<key_type, K&&>::value,
+ -> std::enable_if_t<std::is_constructible_v<key_type, K&&>,
std::pair<iterator, bool>> {
return tree::emplace_key_args(
key, std::piecewise_construct,
@@ -337,7 +337,7 @@
auto flat_map<Key, Mapped, Compare, Container>::try_emplace(const_iterator hint,
K&& key,
Args&&... args)
- -> std::enable_if_t<std::is_constructible<key_type, K&&>::value, iterator> {
+ -> std::enable_if_t<std::is_constructible_v<key_type, K&&>, iterator> {
return tree::emplace_hint_key_args(
hint, key, std::piecewise_construct,
std::forward_as_tuple(std::forward<K>(key)),
diff --git a/base/containers/flat_tree.h b/base/containers/flat_tree.h
index abb02d5..7a8e605 100644
--- a/base/containers/flat_tree.h
+++ b/base/containers/flat_tree.h
@@ -1081,7 +1081,7 @@
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::lower_bound(
const K& key) const -> const_iterator {
- static_assert(std::is_convertible<const KeyTypeOrK<K>&, const K&>::value,
+ static_assert(std::is_convertible_v<const KeyTypeOrK<K>&, const K&>,
"Requested type cannot be bound to the container's key_type "
"which is required for a non-transparent compare.");
@@ -1115,7 +1115,7 @@
template <typename K>
auto flat_tree<Key, GetKeyFromValue, KeyCompare, Container>::upper_bound(
const K& key) const -> const_iterator {
- static_assert(std::is_convertible<const KeyTypeOrK<K>&, const K&>::value,
+ static_assert(std::is_convertible_v<const KeyTypeOrK<K>&, const K&>,
"Requested type cannot be bound to the container's key_type "
"which is required for a non-transparent compare.");
diff --git a/base/containers/flat_tree_unittest.cc b/base/containers/flat_tree_unittest.cc
index 00b6f50..a63fb74 100644
--- a/base/containers/flat_tree_unittest.cc
+++ b/base/containers/flat_tree_unittest.cc
@@ -211,14 +211,14 @@
using MoveThrowsTree = flat_tree<MoveThrows, base::identity, std::less<>,
std::array<MoveThrows, 1>>;
- static_assert(std::is_nothrow_move_constructible<IntTree>::value,
+ static_assert(std::is_nothrow_move_constructible_v<IntTree>,
"Error: IntTree is not nothrow move constructible");
- static_assert(std::is_nothrow_move_assignable<IntTree>::value,
+ static_assert(std::is_nothrow_move_assignable_v<IntTree>,
"Error: IntTree is not nothrow move assignable");
- static_assert(!std::is_nothrow_move_constructible<MoveThrowsTree>::value,
+ static_assert(!std::is_nothrow_move_constructible_v<MoveThrowsTree>,
"Error: MoveThrowsTree is nothrow move constructible");
- static_assert(!std::is_nothrow_move_assignable<MoveThrowsTree>::value,
+ static_assert(!std::is_nothrow_move_assignable_v<MoveThrowsTree>,
"Error: MoveThrowsTree is nothrow move assignable");
}
@@ -291,14 +291,13 @@
TEST(FlatTree, Types) {
// These are guaranteed to be portable.
- static_assert((std::is_same<int, IntTree::key_type>::value), "");
- static_assert((std::is_same<int, IntTree::value_type>::value), "");
- static_assert((std::is_same<std::less<>, IntTree::key_compare>::value), "");
- static_assert((std::is_same<int&, IntTree::reference>::value), "");
- static_assert((std::is_same<const int&, IntTree::const_reference>::value),
- "");
- static_assert((std::is_same<int*, IntTree::pointer>::value), "");
- static_assert((std::is_same<const int*, IntTree::const_pointer>::value), "");
+ static_assert((std::is_same_v<int, IntTree::key_type>), "");
+ static_assert((std::is_same_v<int, IntTree::value_type>), "");
+ static_assert((std::is_same_v<std::less<>, IntTree::key_compare>), "");
+ static_assert((std::is_same_v<int&, IntTree::reference>), "");
+ static_assert((std::is_same_v<const int&, IntTree::const_reference>), "");
+ static_assert((std::is_same_v<int*, IntTree::pointer>), "");
+ static_assert((std::is_same_v<const int*, IntTree::const_pointer>), "");
}
// ----------------------------------------------------------------------------
diff --git a/base/containers/intrusive_heap.h b/base/containers/intrusive_heap.h
index acda39b..44087b7 100644
--- a/base/containers/intrusive_heap.h
+++ b/base/containers/intrusive_heap.h
@@ -539,7 +539,7 @@
private:
// Templated version of ToIndex that lets insert/erase/Replace work with all
// integral types.
- template <typename I, typename = std::enable_if_t<std::is_integral<I>::value>>
+ template <typename I, typename = std::enable_if_t<std::is_integral_v<I>>>
size_type ToIndex(I pos) {
return static_cast<size_type>(pos);
}
diff --git a/base/containers/intrusive_heap_unittest.cc b/base/containers/intrusive_heap_unittest.cc
index f6d83ec..7a54e83 100644
--- a/base/containers/intrusive_heap_unittest.cc
+++ b/base/containers/intrusive_heap_unittest.cc
@@ -143,8 +143,8 @@
// Used to determine whether or not the "take" operations can be used.
template <typename T>
struct NotMovable {
- static constexpr bool value = !std::is_nothrow_move_constructible<T>::value &&
- std::is_copy_constructible<T>::value;
+ static constexpr bool value = !std::is_nothrow_move_constructible_v<T> &&
+ std::is_copy_constructible_v<T>;
};
// Invokes "take" if the type is movable, otherwise invokes erase.
@@ -516,11 +516,11 @@
// default-constructors, move-operations and copy-operations.
template <typename ValueType, bool D, bool M, bool C>
void ValidateValueType() {
- static_assert(std::is_default_constructible<ValueType>::value == D, "oops");
- static_assert(std::is_move_constructible<ValueType>::value == M, "oops");
- static_assert(std::is_move_assignable<ValueType>::value == M, "oops");
- static_assert(std::is_copy_constructible<ValueType>::value == C, "oops");
- static_assert(std::is_copy_assignable<ValueType>::value == C, "oops");
+ static_assert(std::is_default_constructible_v<ValueType> == D, "oops");
+ static_assert(std::is_move_constructible_v<ValueType> == M, "oops");
+ static_assert(std::is_move_assignable_v<ValueType> == M, "oops");
+ static_assert(std::is_copy_constructible_v<ValueType> == C, "oops");
+ static_assert(std::is_copy_assignable_v<ValueType> == C, "oops");
}
// A small test element that provides its own HeapHandle storage and implements
diff --git a/base/containers/span.h b/base/containers/span.h
index f8fed5e..735f79d 100644
--- a/base/containers/span.h
+++ b/base/containers/span.h
@@ -19,8 +19,8 @@
#include "base/containers/checked_iterators.h"
#include "base/containers/contiguous_iterator.h"
#include "base/cxx20_to_address.h"
-#include "base/memory/raw_ptr_exclusion.h"
-#include "base/numerics/safe_math.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/template_util.h"
namespace base {
@@ -285,11 +285,10 @@
CHECK(Extent == dynamic_extent || Extent == count);
}
- template <
- typename It,
- typename End,
- typename = internal::EnableIfCompatibleContiguousIterator<It, T>,
- typename = std::enable_if_t<!std::is_convertible<End, size_t>::value>>
+ template <typename It,
+ typename End,
+ typename = internal::EnableIfCompatibleContiguousIterator<It, T>,
+ typename = std::enable_if_t<!std::is_convertible_v<End, size_t>>>
constexpr span(It begin, End end) noexcept
// Subtracting two iterators gives a ptrdiff_t, but the result should be
// non-negative: see CHECK below.
@@ -496,7 +495,7 @@
template <typename T,
size_t X,
- typename = std::enable_if_t<!std::is_const<T>::value>>
+ typename = std::enable_if_t<!std::is_const_v<T>>>
span<uint8_t, (X == dynamic_extent ? dynamic_extent : sizeof(T) * X)>
as_writable_bytes(span<T, X> s) noexcept {
return {reinterpret_cast<uint8_t*>(s.data()), s.size_bytes()};
diff --git a/base/containers/span_unittest.cc b/base/containers/span_unittest.cc
index 469530b..3801e1c 100644
--- a/base/containers/span_unittest.cc
+++ b/base/containers/span_unittest.cc
@@ -242,19 +242,19 @@
// In particular we are checking whether From is implicitly convertible to To,
// which also implies that To is explicitly constructible from From.
static_assert(
- std::is_convertible<std::array<int, 3>&, base::span<int>>::value,
+ std::is_convertible_v<std::array<int, 3>&, base::span<int>>,
"Error: l-value reference to std::array<int> should be convertible to "
"base::span<int> with dynamic extent.");
static_assert(
- std::is_convertible<std::array<int, 3>&, base::span<int, 3>>::value,
+ std::is_convertible_v<std::array<int, 3>&, base::span<int, 3>>,
"Error: l-value reference to std::array<int> should be convertible to "
"base::span<int> with the same static extent.");
static_assert(
- std::is_convertible<std::array<int, 3>&, base::span<const int>>::value,
+ std::is_convertible_v<std::array<int, 3>&, base::span<const int>>,
"Error: l-value reference to std::array<int> should be convertible to "
"base::span<const int> with dynamic extent.");
static_assert(
- std::is_convertible<std::array<int, 3>&, base::span<const int, 3>>::value,
+ std::is_convertible_v<std::array<int, 3>&, base::span<const int, 3>>,
"Error: l-value reference to std::array<int> should be convertible to "
"base::span<const int> with the same static extent.");
static_assert(std::is_convertible<const std::array<int, 3>&,
@@ -297,12 +297,12 @@
// Args, which also implies that T is not implicitly constructible from Args
// as well.
static_assert(
- !std::is_constructible<base::span<int>, const std::array<int, 3>&>::value,
+ !std::is_constructible_v<base::span<int>, const std::array<int, 3>&>,
"Error: base::span<int> with dynamic extent should not be constructible "
"from const l-value reference to std::array<int>");
static_assert(
- !std::is_constructible<base::span<int>, std::array<const int, 3>&>::value,
+ !std::is_constructible_v<base::span<int>, std::array<const int, 3>&>,
"Error: base::span<int> with dynamic extent should not be constructible "
"from l-value reference to std::array<const int>");
@@ -313,17 +313,17 @@
"const from l-value reference to std::array<const int>");
static_assert(
- !std::is_constructible<base::span<int, 2>, std::array<int, 3>&>::value,
+ !std::is_constructible_v<base::span<int, 2>, std::array<int, 3>&>,
"Error: base::span<int> with static extent should not be constructible "
"from l-value reference to std::array<int> with different extent");
static_assert(
- !std::is_constructible<base::span<int, 4>, std::array<int, 3>&>::value,
+ !std::is_constructible_v<base::span<int, 4>, std::array<int, 3>&>,
"Error: base::span<int> with dynamic extent should not be constructible "
"from l-value reference to std::array<int> with different extent");
static_assert(
- !std::is_constructible<base::span<int>, std::array<bool, 3>&>::value,
+ !std::is_constructible_v<base::span<int>, std::array<bool, 3>&>,
"Error: base::span<int> with dynamic extent should not be constructible "
"from l-value reference to std::array<bool>");
}
@@ -1251,9 +1251,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == dynamic_extent, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromPointerPair) {
@@ -1268,9 +1267,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == dynamic_extent, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromConstexprArray) {
@@ -1280,9 +1278,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == 5, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromStdArray) {
@@ -1292,9 +1289,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == 5, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromConstContainer) {
@@ -1304,9 +1300,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == dynamic_extent, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeStaticSpanFromConstContainer) {
@@ -1316,9 +1311,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == 5, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromContainer) {
@@ -1328,9 +1322,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == dynamic_extent, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeStaticSpanFromContainer) {
@@ -1340,9 +1333,8 @@
EXPECT_EQ(expected_span.data(), make_span<5>(vector).data());
EXPECT_EQ(expected_span.size(), make_span<5>(vector).size());
static_assert(decltype(make_span<5>(vector))::extent == 5, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeStaticSpanFromConstexprContainer) {
@@ -1368,9 +1360,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == dynamic_extent, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeStaticSpanFromRValueContainer) {
@@ -1384,9 +1375,8 @@
EXPECT_EQ(expected_span.data(), made_span.data());
EXPECT_EQ(expected_span.size(), made_span.size());
static_assert(decltype(made_span)::extent == 5, "");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromDynamicSpan) {
@@ -1406,9 +1396,8 @@
static_assert(decltype(made_span)::extent == decltype(expected_span)::extent,
"make_span(span) should have the same extent as span");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, MakeSpanFromStaticSpan) {
@@ -1428,9 +1417,8 @@
static_assert(decltype(made_span)::extent == decltype(expected_span)::extent,
"make_span(span) should have the same extent as span");
- static_assert(
- std::is_same<decltype(expected_span), decltype(made_span)>::value,
- "the type of made_span differs from expected_span!");
+ static_assert(std::is_same_v<decltype(expected_span), decltype(made_span)>,
+ "the type of made_span differs from expected_span!");
}
TEST(SpanTest, EnsureConstexprGoodness) {
@@ -1544,20 +1532,20 @@
// Statically checks that various conversions between spans of dynamic and
// static extent are possible or not.
static_assert(
- !std::is_constructible<span<int, 0>, span<int>>::value,
+ !std::is_constructible_v<span<int, 0>, span<int>>,
"Error: static span should not be constructible from dynamic span");
- static_assert(!std::is_constructible<span<int, 2>, span<int, 1>>::value,
+ static_assert(!std::is_constructible_v<span<int, 2>, span<int, 1>>,
"Error: static span should not be constructible from static "
"span with different extent");
- static_assert(std::is_convertible<span<int, 0>, span<int>>::value,
+ static_assert(std::is_convertible_v<span<int, 0>, span<int>>,
"Error: static span should be convertible to dynamic span");
- static_assert(std::is_convertible<span<int>, span<int>>::value,
+ static_assert(std::is_convertible_v<span<int>, span<int>>,
"Error: dynamic span should be convertible to dynamic span");
- static_assert(std::is_convertible<span<int, 2>, span<int, 2>>::value,
+ static_assert(std::is_convertible_v<span<int, 2>, span<int, 2>>,
"Error: static span should be convertible to static span");
}
diff --git a/base/containers/vector_buffer.h b/base/containers/vector_buffer.h
index 3c8d102..e2f856b 100644
--- a/base/containers/vector_buffer.h
+++ b/base/containers/vector_buffer.h
@@ -97,14 +97,14 @@
// Trivially destructible objects need not have their destructors called.
template <typename T2 = T,
- typename std::enable_if<std::is_trivially_destructible<T2>::value,
+ typename std::enable_if<std::is_trivially_destructible_v<T2>,
int>::type = 0>
void DestructRange(T* begin, T* end) {}
// Non-trivially destructible objects must have their destructors called
// individually.
template <typename T2 = T,
- typename std::enable_if<!std::is_trivially_destructible<T2>::value,
+ typename std::enable_if<!std::is_trivially_destructible_v<T2>,
int>::type = 0>
void DestructRange(T* begin, T* end) {
CHECK_LE(begin, end);
@@ -148,7 +148,7 @@
// destruct the original.
template <
typename T2 = T,
- typename std::enable_if<std::is_move_constructible<T2>::value &&
+ typename std::enable_if<std::is_move_constructible_v<T2> &&
!is_trivially_copyable_or_relocatable<T2>,
int>::type = 0>
static void MoveRange(T* from_begin, T* from_end, T* to) {
@@ -165,7 +165,7 @@
// destruct the original.
template <
typename T2 = T,
- typename std::enable_if<!std::is_move_constructible<T2>::value &&
+ typename std::enable_if<!std::is_move_constructible_v<T2> &&
!is_trivially_copyable_or_relocatable<T2>,
int>::type = 0>
static void MoveRange(T* from_begin, T* from_end, T* to) {
diff --git a/base/debug/crash_logging.h b/base/debug/crash_logging.h
index c492b27..5069651 100644
--- a/base/debug/crash_logging.h
+++ b/base/debug/crash_logging.h
@@ -172,7 +172,7 @@
::base::debug::CrashKeySize::Size1024)
#define SCOPED_CRASH_KEY_BOOL(category, name, data) \
- static_assert(std::is_same<std::decay_t<decltype(data)>, bool>::value, \
+ static_assert(std::is_same_v<std::decay_t<decltype(data)>, bool>, \
"SCOPED_CRASH_KEY_BOOL must be passed a boolean value."); \
SCOPED_CRASH_KEY_STRING32(category, name, (data) ? "true" : "false")
diff --git a/base/debug/crash_logging_unittest.nc b/base/debug/crash_logging_unittest.nc
index 97ad6e1..669b183 100644
--- a/base/debug/crash_logging_unittest.nc
+++ b/base/debug/crash_logging_unittest.nc
@@ -7,7 +7,7 @@
#include "base/debug/crash_logging.h"
-#if defined(NCTEST_SCOPED_CRASH_KEY_BOOL_ON_NON_BOOL_ARG) // [r"static assertion failed due to requirement 'std::is_same<int, bool>::value': SCOPED_CRASH_KEY_BOOL must be passed a boolean value\."]
+#if defined(NCTEST_SCOPED_CRASH_KEY_BOOL_ON_NON_BOOL_ARG) // [r"static assertion failed due to requirement 'std::is_same_v<int, bool>': SCOPED_CRASH_KEY_BOOL must be passed a boolean value\."]
void WontCompile() {
SCOPED_CRASH_KEY_BOOL(category, name, 1);
diff --git a/base/debug/stack_trace.cc b/base/debug/stack_trace.cc
index 3debc8b..551c0d2 100644
--- a/base/debug/stack_trace.cc
+++ b/base/debug/stack_trace.cc
@@ -261,13 +261,6 @@
#endif
}
-const void *const *StackTrace::Addresses(size_t* count) const {
- *count = count_;
- if (count_)
- return trace_;
- return nullptr;
-}
-
void StackTrace::Print() const {
PrintWithPrefix(nullptr);
}
diff --git a/base/debug/stack_trace.h b/base/debug/stack_trace.h
index a16c959..9f5d7c0 100644
--- a/base/debug/stack_trace.h
+++ b/base/debug/stack_trace.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/containers/span.h"
#include "base/debug/debugging_buildflags.h"
#include "base/memory/raw_ptr.h"
#include "build/build_config.h"
@@ -104,7 +105,9 @@
// number of elements in the returned array. Addresses()[0] will contain an
// address from the leaf function, and Addresses()[count-1] will contain an
// address from the root function (i.e.; the thread's entry point).
- const void* const* Addresses(size_t* count) const;
+ span<const void* const> addresses() const {
+ return make_span(trace_, count_);
+ }
// Prints the stack trace to stderr.
void Print() const;
diff --git a/base/debug/stack_trace_perftest.cc b/base/debug/stack_trace_perftest.cc
index 4f03e50..1fbdeba 100644
--- a/base/debug/stack_trace_perftest.cc
+++ b/base/debug/stack_trace_perftest.cc
@@ -4,6 +4,7 @@
#include <vector>
+#include "base/containers/span.h"
#include "base/debug/stack_trace.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
@@ -32,19 +33,19 @@
class StackTracer {
public:
- StackTracer(size_t trace_count) : trace_count(trace_count) {}
+ StackTracer(size_t trace_count) : trace_count_(trace_count) {}
void Trace() {
- size_t tmp;
- base::debug::StackTrace st(trace_count);
- const void* addresses = st.Addresses(&tmp);
+ StackTrace st(trace_count_);
+ span<const void* const> addresses = st.addresses();
// make sure a valid array of stack frames is returned
- EXPECT_NE(addresses, nullptr);
+ ASSERT_FALSE(addresses.empty());
+ EXPECT_TRUE(addresses[0]);
// make sure the test generates the intended count of stack frames
- EXPECT_EQ(trace_count, tmp);
+ EXPECT_EQ(trace_count_, addresses.size());
}
private:
- const size_t trace_count;
+ const size_t trace_count_;
};
void MultiObjTest(size_t trace_count) {
diff --git a/base/debug/stack_trace_unittest.cc b/base/debug/stack_trace_unittest.cc
index 4759f27..0662aa8 100644
--- a/base/debug/stack_trace_unittest.cc
+++ b/base/debug/stack_trace_unittest.cc
@@ -10,6 +10,7 @@
#include "base/debug/debugging_buildflags.h"
#include "base/debug/stack_trace.h"
+#include "base/immediate_crash.h"
#include "base/logging.h"
#include "base/process/kill.h"
#include "base/process/process_handle.h"
@@ -20,6 +21,12 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
+#include "base/allocator/buildflags.h"
+#include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc.h"
+#if BUILDFLAG(USE_ALLOCATOR_SHIM)
+#include "base/allocator/partition_allocator/src/partition_alloc/shim/allocator_shim.h"
+#endif
+
#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_IOS)
#include "base/test/multiprocess_test.h"
#endif
@@ -32,6 +39,7 @@
#else
typedef testing::Test StackTraceTest;
#endif
+typedef testing::Test StackTraceDeathTest;
#if !defined(__UCLIBC__) && !defined(_AIX)
// StackTrace::OutputToStream() is not implemented under uclibc, nor AIX.
@@ -48,8 +56,7 @@
// ToString() should produce the same output.
EXPECT_EQ(backtrace_message, trace.ToString());
- size_t frames_found = 0;
- const void* const* addresses = trace.Addresses(&frames_found);
+ span<const void* const> addresses = trace.addresses();
#if defined(OFFICIAL_BUILD) && \
((BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)) || BUILDFLAG(IS_FUCHSIA))
@@ -61,8 +68,8 @@
// ((BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE)) ||
// BUILDFLAG(IS_FUCHSIA))
- ASSERT_TRUE(addresses);
- ASSERT_GT(frames_found, 5u) << "Too few frames found.";
+ ASSERT_GT(addresses.size(), 5u) << "Too few frames found.";
+ ASSERT_TRUE(addresses[0]);
if (!StackTrace::WillSymbolizeToStreamForTesting())
return;
@@ -98,13 +105,10 @@
TEST_F(StackTraceTest, TruncatedTrace) {
StackTrace trace;
- size_t count = 0;
- trace.Addresses(&count);
- ASSERT_LT(2u, count);
+ ASSERT_LT(2u, trace.addresses().size());
StackTrace truncated(2);
- truncated.Addresses(&count);
- EXPECT_EQ(2u, count);
+ EXPECT_EQ(2u, truncated.addresses().size());
}
#endif // !defined(OFFICIAL_BUILD) && !defined(NO_UNWIND_TABLES)
@@ -157,31 +161,91 @@
#endif // !defined(__UCLIBC__) && !defined(_AIX)
#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID)
-#if !BUILDFLAG(IS_IOS)
-static char* newArray() {
- // Clang warns about the mismatched new[]/delete if they occur in the same
- // function.
- return new char[10];
+// Since Mac's base::debug::StackTrace().Print() is not malloc-free, skip
+// StackDumpSignalHandlerIsMallocFree if BUILDFLAG(IS_MAC).
+#if BUILDFLAG(USE_ALLOCATOR_SHIM) && !BUILDFLAG(IS_MAC)
+
+namespace {
+
+// ImmediateCrash if a signal handler incorrectly uses malloc().
+// In an actual implementation, this could cause infinite recursion into the
+// signal handler or other problems. Because malloc() is not guaranteed to be
+// async signal safe.
+void* BadMalloc(const allocator_shim::AllocatorDispatch*, size_t, void*) {
+ base::ImmediateCrash();
}
-MULTIPROCESS_TEST_MAIN(MismatchedMallocChildProcess) {
- char* pointer = newArray();
- delete pointer;
- return 2;
+void* BadCalloc(const allocator_shim::AllocatorDispatch*,
+ size_t,
+ size_t,
+ void* context) {
+ base::ImmediateCrash();
}
-// Regression test for StackDumpingSignalHandler async-signal unsafety.
-// Combined with tcmalloc's debugallocation, that signal handler
-// and e.g. mismatched new[]/delete would cause a hang because
-// of re-entering malloc.
-TEST_F(StackTraceTest, AsyncSignalUnsafeSignalHandlerHang) {
- Process child = SpawnChild("MismatchedMallocChildProcess");
- ASSERT_TRUE(child.IsValid());
- int exit_code;
- ASSERT_TRUE(
- child.WaitForExitWithTimeout(TestTimeouts::action_timeout(), &exit_code));
+void* BadAlignedAlloc(const allocator_shim::AllocatorDispatch*,
+ size_t,
+ size_t,
+ void*) {
+ base::ImmediateCrash();
}
-#endif // !BUILDFLAG(IS_IOS)
+
+void* BadAlignedRealloc(const allocator_shim::AllocatorDispatch*,
+ void*,
+ size_t,
+ size_t,
+ void*) {
+ base::ImmediateCrash();
+}
+
+void* BadRealloc(const allocator_shim::AllocatorDispatch*,
+ void*,
+ size_t,
+ void*) {
+ base::ImmediateCrash();
+}
+
+void BadFree(const allocator_shim::AllocatorDispatch*, void*, void*) {
+ base::ImmediateCrash();
+}
+
+allocator_shim::AllocatorDispatch g_bad_malloc_dispatch = {
+ &BadMalloc, /* alloc_function */
+ &BadMalloc, /* alloc_unchecked_function */
+ &BadCalloc, /* alloc_zero_initialized_function */
+ &BadAlignedAlloc, /* alloc_aligned_function */
+ &BadRealloc, /* realloc_function */
+ &BadFree, /* free_function */
+ nullptr, /* get_size_estimate_function */
+ nullptr, /* claimed_address_function */
+ nullptr, /* batch_malloc_function */
+ nullptr, /* batch_free_function */
+ nullptr, /* free_definite_size_function */
+ nullptr, /* try_free_default_function */
+ &BadAlignedAlloc, /* aligned_malloc_function */
+ &BadAlignedRealloc, /* aligned_realloc_function */
+ &BadFree, /* aligned_free_function */
+ nullptr, /* next */
+};
+
+} // namespace
+
+// Regression test for StackDumpSignalHandler async-signal unsafety.
+// Since malloc() is not guaranteed to be async signal safe, it is not allowed
+// to use malloc() inside StackDumpSignalHandler().
+TEST_F(StackTraceDeathTest, StackDumpSignalHandlerIsMallocFree) {
+ EXPECT_DEATH_IF_SUPPORTED(
+ [] {
+ // On Android, base::debug::EnableInProcessStackDumping() does not
+ // change any actions taken by signals to be StackDumpSignalHandler. So
+ // the StackDumpSignalHandlerIsMallocFree test doesn't work on Android.
+ EnableInProcessStackDumping();
+ allocator_shim::InsertAllocatorDispatch(&g_bad_malloc_dispatch);
+ // Raise SIGSEGV to invoke StackDumpSignalHandler().
+ kill(getpid(), SIGSEGV);
+ }(),
+ "\\[end of stack trace\\]\n");
+}
+#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
namespace {
diff --git a/base/debug/task_trace.cc b/base/debug/task_trace.cc
index 8a9f8a8..14775df 100644
--- a/base/debug/task_trace.cc
+++ b/base/debug/task_trace.cc
@@ -4,22 +4,20 @@
#include "base/debug/task_trace.h"
+#include <algorithm>
+#include <iostream>
+#include <sstream>
+
+#include "base/pending_task.h"
#include "base/ranges/algorithm.h"
+#include "base/task/common/task_annotator.h"
#include "build/build_config.h"
#if BUILDFLAG(IS_ANDROID)
#include <android/log.h>
-#endif // BUILDFLAG(IS_ANDROID)
-#include <iostream>
-#include <sstream>
-
-#if BUILDFLAG(IS_ANDROID)
#include "base/no_destructor.h"
-#endif
-
-#include "base/pending_task.h"
-#include "base/task/common/task_annotator.h"
+#endif // BUILDFLAG(IS_ANDROID)
namespace base {
namespace debug {
@@ -98,11 +96,11 @@
if (empty()) {
return count;
}
- const void* const* current_addresses = stack_trace_->Addresses(&count);
- for (size_t i = 0; i < count && i < addresses.size(); ++i) {
- addresses[i] = current_addresses[i];
- }
- return count;
+ span<const void* const> current_addresses = stack_trace_->addresses();
+ ranges::copy_n(current_addresses.begin(),
+ std::min(current_addresses.size(), addresses.size()),
+ addresses.begin());
+ return current_addresses.size();
}
std::ostream& operator<<(std::ostream& os, const TaskTrace& task_trace) {
diff --git a/base/debug/test_elf_image_builder.cc b/base/debug/test_elf_image_builder.cc
index 6218e4c..e2201b5 100644
--- a/base/debug/test_elf_image_builder.cc
+++ b/base/debug/test_elf_image_builder.cc
@@ -287,8 +287,7 @@
// static
template <typename T>
uint8_t* TestElfImageBuilder::AppendHdr(const T& hdr, uint8_t* loc) {
- static_assert(std::is_trivially_copyable<T>::value,
- "T should be a plain struct");
+ static_assert(std::is_trivially_copyable_v<T>, "T should be a plain struct");
memcpy(loc, &hdr, sizeof(T));
return loc + sizeof(T);
}
diff --git a/base/features.cc b/base/features.cc
index 89bacef..114d512 100644
--- a/base/features.cc
+++ b/base/features.cc
@@ -34,11 +34,11 @@
BASE_FEATURE(kJsonNegativeZero, "JsonNegativeZero", FEATURE_ENABLED_BY_DEFAULT);
-#if BUILDFLAG(IS_ANDROID)
-// Force to enable LowEndDeviceMode partially on Android mid-range devices.
-// Such devices aren't considered low-end, but we'd like experiment with
-// a subset of low-end features to see if we get a good memory vs. performance
-// tradeoff.
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+// Used to enable LowEndDeviceMode partially on Android and ChromeOS mid-range
+// devices. Such devices aren't considered low-end, but we'd like experiment
+// with a subset of low-end features to see if we get a good memory vs.
+// performance tradeoff.
//
// TODO(crbug.com/1434873): |#if| out 32-bit before launching or going to
// high Stable %, because we will enable the feature only for <8GB 64-bit
@@ -46,8 +46,15 @@
// population to collect data.
BASE_FEATURE(kPartialLowEndModeOnMidRangeDevices,
"PartialLowEndModeOnMidRangeDevices",
+#if BUILDFLAG(IS_ANDROID)
base::FEATURE_ENABLED_BY_DEFAULT);
+#elif BUILDFLAG(IS_CHROMEOS)
+ base::FEATURE_DISABLED_BY_DEFAULT);
+#endif
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
+
+#if BUILDFLAG(IS_ANDROID)
// Whether to report frame metrics to the Android.FrameTimeline.* histograms.
BASE_FEATURE(kCollectAndroidFrameTimelineMetrics,
"CollectAndroidFrameTimelineMetrics",
diff --git a/base/features.h b/base/features.h
index 7e8af19..3f1fa0b 100644
--- a/base/features.h
+++ b/base/features.h
@@ -25,11 +25,13 @@
BASE_EXPORT BASE_DECLARE_FEATURE(kJsonNegativeZero);
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
BASE_EXPORT BASE_DECLARE_FEATURE(kPartialLowEndModeOnMidRangeDevices);
extern const BASE_EXPORT FeatureParam<bool>
kPartialLowEndModeExcludeLowEndBackgroundCleanup;
+#endif
+#if BUILDFLAG(IS_ANDROID)
BASE_EXPORT BASE_DECLARE_FEATURE(kCollectAndroidFrameTimelineMetrics);
#endif
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 032899a..663a769 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -17,6 +17,7 @@
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
#include "base/strings/sys_string_conversions.h"
+#include "base/strings/utf_ostream_operators.h"
#include "base/strings/utf_string_conversions.h"
#include "base/trace_event/base_tracing.h"
diff --git a/base/files/file_path_unittest.cc b/base/files/file_path_unittest.cc
index 9673a48..6555218 100644
--- a/base/files/file_path_unittest.cc
+++ b/base/files/file_path_unittest.cc
@@ -9,6 +9,7 @@
#include <sstream>
#include "base/files/safe_base_name.h"
+#include "base/strings/utf_ostream_operators.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
#include "build/buildflag.h"
diff --git a/base/functional/bind_unittest.cc b/base/functional/bind_unittest.cc
index ea46db8..451b3a1 100644
--- a/base/functional/bind_unittest.cc
+++ b/base/functional/bind_unittest.cc
@@ -1598,56 +1598,51 @@
// Check if Callback variants have declarations of conversions as expected.
// Copy constructor and assignment of RepeatingCallback.
static_assert(
- std::is_constructible<RepeatingClosure, const RepeatingClosure&>::value,
+ std::is_constructible_v<RepeatingClosure, const RepeatingClosure&>,
"RepeatingClosure should be copyable.");
- static_assert(
- std::is_assignable<RepeatingClosure, const RepeatingClosure&>::value,
- "RepeatingClosure should be copy-assignable.");
+ static_assert(std::is_assignable_v<RepeatingClosure, const RepeatingClosure&>,
+ "RepeatingClosure should be copy-assignable.");
// Move constructor and assignment of RepeatingCallback.
- static_assert(
- std::is_constructible<RepeatingClosure, RepeatingClosure&&>::value,
- "RepeatingClosure should be movable.");
- static_assert(std::is_assignable<RepeatingClosure, RepeatingClosure&&>::value,
+ static_assert(std::is_constructible_v<RepeatingClosure, RepeatingClosure&&>,
+ "RepeatingClosure should be movable.");
+ static_assert(std::is_assignable_v<RepeatingClosure, RepeatingClosure&&>,
"RepeatingClosure should be move-assignable");
// Conversions from OnceCallback to RepeatingCallback.
- static_assert(
- !std::is_constructible<RepeatingClosure, const OnceClosure&>::value,
- "OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(
- !std::is_assignable<RepeatingClosure, const OnceClosure&>::value,
- "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!std::is_constructible_v<RepeatingClosure, const OnceClosure&>,
+ "OnceClosure should not be convertible to RepeatingClosure.");
+ static_assert(!std::is_assignable_v<RepeatingClosure, const OnceClosure&>,
+ "OnceClosure should not be convertible to RepeatingClosure.");
// Destructive conversions from OnceCallback to RepeatingCallback.
- static_assert(!std::is_constructible<RepeatingClosure, OnceClosure&&>::value,
+ static_assert(!std::is_constructible_v<RepeatingClosure, OnceClosure&&>,
"OnceClosure should not be convertible to RepeatingClosure.");
- static_assert(!std::is_assignable<RepeatingClosure, OnceClosure&&>::value,
+ static_assert(!std::is_assignable_v<RepeatingClosure, OnceClosure&&>,
"OnceClosure should not be convertible to RepeatingClosure.");
// Copy constructor and assignment of OnceCallback.
- static_assert(!std::is_constructible<OnceClosure, const OnceClosure&>::value,
+ static_assert(!std::is_constructible_v<OnceClosure, const OnceClosure&>,
"OnceClosure should not be copyable.");
- static_assert(!std::is_assignable<OnceClosure, const OnceClosure&>::value,
+ static_assert(!std::is_assignable_v<OnceClosure, const OnceClosure&>,
"OnceClosure should not be copy-assignable");
// Move constructor and assignment of OnceCallback.
- static_assert(std::is_constructible<OnceClosure, OnceClosure&&>::value,
+ static_assert(std::is_constructible_v<OnceClosure, OnceClosure&&>,
"OnceClosure should be movable.");
- static_assert(std::is_assignable<OnceClosure, OnceClosure&&>::value,
+ static_assert(std::is_assignable_v<OnceClosure, OnceClosure&&>,
"OnceClosure should be move-assignable.");
// Conversions from RepeatingCallback to OnceCallback.
- static_assert(
- std::is_constructible<OnceClosure, const RepeatingClosure&>::value,
- "RepeatingClosure should be convertible to OnceClosure.");
- static_assert(std::is_assignable<OnceClosure, const RepeatingClosure&>::value,
+ static_assert(std::is_constructible_v<OnceClosure, const RepeatingClosure&>,
+ "RepeatingClosure should be convertible to OnceClosure.");
+ static_assert(std::is_assignable_v<OnceClosure, const RepeatingClosure&>,
"RepeatingClosure should be convertible to OnceClosure.");
// Destructive conversions from RepeatingCallback to OnceCallback.
- static_assert(std::is_constructible<OnceClosure, RepeatingClosure&&>::value,
+ static_assert(std::is_constructible_v<OnceClosure, RepeatingClosure&&>,
"RepeatingClosure should be convertible to OnceClosure.");
- static_assert(std::is_assignable<OnceClosure, RepeatingClosure&&>::value,
+ static_assert(std::is_assignable_v<OnceClosure, RepeatingClosure&&>,
"RepeatingClosure should be covretible to OnceClosure.");
OnceClosure cb = BindOnce(&VoidPolymorphic<>::Run);
diff --git a/base/functional/callback_unittest.cc b/base/functional/callback_unittest.cc
index 1d9043d..f56b2aa 100644
--- a/base/functional/callback_unittest.cc
+++ b/base/functional/callback_unittest.cc
@@ -63,21 +63,20 @@
};
TEST_F(CallbackTest, Types) {
- static_assert(std::is_same<void, OnceClosure::ResultType>::value, "");
- static_assert(std::is_same<void(), OnceClosure::RunType>::value, "");
+ static_assert(std::is_same_v<void, OnceClosure::ResultType>, "");
+ static_assert(std::is_same_v<void(), OnceClosure::RunType>, "");
using OnceCallbackT = OnceCallback<double(int, char)>;
- static_assert(std::is_same<double, OnceCallbackT::ResultType>::value, "");
- static_assert(std::is_same<double(int, char), OnceCallbackT::RunType>::value,
- "");
+ static_assert(std::is_same_v<double, OnceCallbackT::ResultType>, "");
+ static_assert(std::is_same_v<double(int, char), OnceCallbackT::RunType>, "");
- static_assert(std::is_same<void, RepeatingClosure::ResultType>::value, "");
- static_assert(std::is_same<void(), RepeatingClosure::RunType>::value, "");
+ static_assert(std::is_same_v<void, RepeatingClosure::ResultType>, "");
+ static_assert(std::is_same_v<void(), RepeatingClosure::RunType>, "");
using RepeatingCallbackT = RepeatingCallback<bool(float, short)>;
- static_assert(std::is_same<bool, RepeatingCallbackT::ResultType>::value, "");
- static_assert(
- std::is_same<bool(float, short), RepeatingCallbackT::RunType>::value, "");
+ static_assert(std::is_same_v<bool, RepeatingCallbackT::ResultType>, "");
+ static_assert(std::is_same_v<bool(float, short), RepeatingCallbackT::RunType>,
+ "");
}
// Ensure we can create unbound callbacks. We need this to be able to store
@@ -324,8 +323,7 @@
return BindRepeating(function, std::forward<FArgs>(args)...);
}
- template <typename R2 = R,
- std::enable_if_t<!std::is_void<R2>::value, int> = 0>
+ template <typename R2 = R, std::enable_if_t<!std::is_void_v<R2>, int> = 0>
static int Outer(std::string* s,
std::unique_ptr<int> a,
std::unique_ptr<int> b) {
@@ -333,34 +331,32 @@
*s += base::NumberToString(*a) + base::NumberToString(*b);
return *a + *b;
}
- template <typename R2 = R,
- std::enable_if_t<!std::is_void<R2>::value, int> = 0>
+ template <typename R2 = R, std::enable_if_t<!std::is_void_v<R2>, int> = 0>
static int Outer(std::string* s, int a, int b) {
*s += "Outer";
*s += base::NumberToString(a) + base::NumberToString(b);
return a + b;
}
- template <typename R2 = R,
- std::enable_if_t<!std::is_void<R2>::value, int> = 0>
+ template <typename R2 = R, std::enable_if_t<!std::is_void_v<R2>, int> = 0>
static int Outer(std::string* s) {
*s += "Outer";
*s += "None";
return 99;
}
- template <typename R2 = R, std::enable_if_t<std::is_void<R2>::value, int> = 0>
+ template <typename R2 = R, std::enable_if_t<std::is_void_v<R2>, int> = 0>
static void Outer(std::string* s,
std::unique_ptr<int> a,
std::unique_ptr<int> b) {
*s += "Outer";
*s += base::NumberToString(*a) + base::NumberToString(*b);
}
- template <typename R2 = R, std::enable_if_t<std::is_void<R2>::value, int> = 0>
+ template <typename R2 = R, std::enable_if_t<std::is_void_v<R2>, int> = 0>
static void Outer(std::string* s, int a, int b) {
*s += "Outer";
*s += base::NumberToString(a) + base::NumberToString(b);
}
- template <typename R2 = R, std::enable_if_t<std::is_void<R2>::value, int> = 0>
+ template <typename R2 = R, std::enable_if_t<std::is_void_v<R2>, int> = 0>
static void Outer(std::string* s) {
*s += "Outer";
*s += "None";
@@ -368,20 +364,20 @@
template <typename OuterR,
typename InnerR,
- std::enable_if_t<!std::is_void<OuterR>::value, int> = 0,
- std::enable_if_t<!std::is_void<InnerR>::value, int> = 0>
+ std::enable_if_t<!std::is_void_v<OuterR>, int> = 0,
+ std::enable_if_t<!std::is_void_v<InnerR>, int> = 0>
static int Inner(std::string* s, OuterR a) {
- static_assert(std::is_same<InnerR, int>::value, "Use int return type");
+ static_assert(std::is_same_v<InnerR, int>, "Use int return type");
*s += "Inner";
*s += base::NumberToString(a);
return a;
}
template <typename OuterR,
typename InnerR,
- std::enable_if_t<std::is_void<OuterR>::value, int> = 0,
- std::enable_if_t<!std::is_void<InnerR>::value, int> = 0>
+ std::enable_if_t<std::is_void_v<OuterR>, int> = 0,
+ std::enable_if_t<!std::is_void_v<InnerR>, int> = 0>
static int Inner(std::string* s) {
- static_assert(std::is_same<InnerR, int>::value, "Use int return type");
+ static_assert(std::is_same_v<InnerR, int>, "Use int return type");
*s += "Inner";
*s += "None";
return 99;
@@ -389,16 +385,16 @@
template <typename OuterR,
typename InnerR,
- std::enable_if_t<!std::is_void<OuterR>::value, int> = 0,
- std::enable_if_t<std::is_void<InnerR>::value, int> = 0>
+ std::enable_if_t<!std::is_void_v<OuterR>, int> = 0,
+ std::enable_if_t<std::is_void_v<InnerR>, int> = 0>
static void Inner(std::string* s, OuterR a) {
*s += "Inner";
*s += base::NumberToString(a);
}
template <typename OuterR,
typename InnerR,
- std::enable_if_t<std::is_void<OuterR>::value, int> = 0,
- std::enable_if_t<std::is_void<InnerR>::value, int> = 0>
+ std::enable_if_t<std::is_void_v<OuterR>, int> = 0,
+ std::enable_if_t<std::is_void_v<InnerR>, int> = 0>
static void Inner(std::string* s) {
*s += "Inner";
*s += "None";
diff --git a/base/functional/invoke.h b/base/functional/invoke.h
index 0f47311..aa3f58f 100644
--- a/base/functional/invoke.h
+++ b/base/functional/invoke.h
@@ -34,17 +34,15 @@
// Small helpers used below in internal::invoke to make the SFINAE more concise.
template <typename F>
-const bool& IsMemFunPtr =
- std::is_member_function_pointer<std::decay_t<F>>::value;
+const bool& IsMemFunPtr = std::is_member_function_pointer_v<std::decay_t<F>>;
template <typename F>
-const bool& IsMemObjPtr = std::is_member_object_pointer<std::decay_t<F>>::value;
+const bool& IsMemObjPtr = std::is_member_object_pointer_v<std::decay_t<F>>;
template <typename F,
typename T,
typename MemPtrClass = member_pointer_class_t<std::decay_t<F>>>
-const bool& IsMemPtrToBaseOf =
- std::is_base_of<MemPtrClass, std::decay_t<T>>::value;
+const bool& IsMemPtrToBaseOf = std::is_base_of_v<MemPtrClass, std::decay_t<T>>;
template <typename T>
const bool& IsRefWrapper = is_reference_wrapper<std::decay_t<T>>::value;
diff --git a/base/linux_util.cc b/base/linux_util.cc
index dea7ca4..51fb324 100644
--- a/base/linux_util.cc
+++ b/base/linux_util.cc
@@ -88,6 +88,24 @@
};
#endif // !BUILDFLAG(IS_CHROMEOS_ASH)
+bool GetThreadsFromProcessDir(const char* dir_path, std::vector<pid_t>* tids) {
+ DirReaderPosix dir_reader(dir_path);
+
+ if (!dir_reader.IsValid()) {
+ DLOG(WARNING) << "Cannot open " << dir_path;
+ return false;
+ }
+
+ while (dir_reader.Next()) {
+ pid_t tid;
+ if (StringToInt(dir_reader.name(), &tid)) {
+ tids->push_back(tid);
+ }
+ }
+
+ return true;
+}
+
// Account for the terminating null character.
constexpr int kDistroSize = 128 + 1;
@@ -138,20 +156,11 @@
// 25 > strlen("/proc//task") + strlen(std::to_string(INT_MAX)) + 1 = 22
char buf[25];
strings::SafeSPrintf(buf, "/proc/%d/task", pid);
- DirReaderPosix dir_reader(buf);
+ return GetThreadsFromProcessDir(buf, tids);
+}
- if (!dir_reader.IsValid()) {
- DLOG(WARNING) << "Cannot open " << buf;
- return false;
- }
-
- while (dir_reader.Next()) {
- pid_t tid;
- if (StringToInt(dir_reader.name(), &tid))
- tids->push_back(tid);
- }
-
- return true;
+bool GetThreadsForCurrentProcess(std::vector<pid_t>* tids) {
+ return GetThreadsFromProcessDir("/proc/self/task", tids);
}
pid_t FindThreadIDWithSyscall(pid_t pid, const std::string& expected_data,
diff --git a/base/linux_util.h b/base/linux_util.h
index b377283..9f913e8 100644
--- a/base/linux_util.h
+++ b/base/linux_util.h
@@ -37,6 +37,14 @@
// true and appends the list of threads to |tids|. Otherwise, returns false.
BASE_EXPORT bool GetThreadsForProcess(pid_t pid, std::vector<pid_t>* tids);
+// Get a list of all threads for the current process. On success, returns true
+// and appends the list of threads to |tids|. Otherwise, returns false.
+// Unlike the function above, this function reads /proc/self/tasks, not
+// /proc/<pid>/tasks. On Android, the former should always be accessible to
+// GPU and Browser processes, while the latter may or may not be accessible
+// depending on the system and the app configuration.
+BASE_EXPORT bool GetThreadsForCurrentProcess(std::vector<pid_t>* tids);
+
// For a given process |pid|, look through all its threads and find the first
// thread with /proc/[pid]/task/[thread_id]/syscall whose first N bytes matches
// |expected_data|, where N is the length of |expected_data|.
diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h
index 0e08a84..06df106 100644
--- a/base/memory/raw_scoped_refptr_mismatch_checker.h
+++ b/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -44,7 +44,7 @@
IsRefCountedType<base::RemoveRawRefT<T>>>,
std::conjunction<base::IsPointer<T>,
IsRefCountedType<base::RemovePointerT<T>>>> {
- static_assert(!std::is_reference<T>::value,
+ static_assert(!std::is_reference_v<T>,
"NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
};
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index 6658dba..a7b12e7 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -135,7 +135,7 @@
#endif
mutable uint32_t ref_count_ = 0;
- static_assert(std::is_unsigned<decltype(ref_count_)>::value,
+ static_assert(std::is_unsigned_v<decltype(ref_count_)>,
"ref_count_ must be an unsigned type.");
#if DCHECK_IS_ON()
diff --git a/base/memory/ref_counted_unittest.nc b/base/memory/ref_counted_unittest.nc
index d5aa4fa..1ed0228 100644
--- a/base/memory/ref_counted_unittest.nc
+++ b/base/memory/ref_counted_unittest.nc
@@ -15,7 +15,7 @@
~InitialRefCountIsZero() {}
};
-#if defined(NCTEST_ADOPT_REF_TO_ZERO_START) // [r"fatal error: static assertion failed due to requirement 'std::is_same<base::subtle::StartRefCountFromOneTag, base::subtle::StartRefCountFromZeroTag>::value': Use AdoptRef only if the reference count starts from one\."]
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START) // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<base::subtle::StartRefCountFromOneTag, base::subtle::StartRefCountFromZeroTag>': Use AdoptRef only if the reference count starts from one\."]
void WontCompile() {
AdoptRef(new InitialRefCountIsZero());
diff --git a/base/memory/scoped_refptr.h b/base/memory/scoped_refptr.h
index f16cac0..f3f25ac 100644
--- a/base/memory/scoped_refptr.h
+++ b/base/memory/scoped_refptr.h
@@ -118,7 +118,7 @@
template <typename T>
scoped_refptr<T> AdoptRef(T* obj) {
using Tag = std::decay_t<decltype(subtle::GetRefCountPreference<T>())>;
- static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+ static_assert(std::is_same_v<subtle::StartRefCountFromOneTag, Tag>,
"Use AdoptRef only if the reference count starts from one.");
DCHECK(obj);
@@ -246,9 +246,9 @@
scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
// Copy conversion constructor.
- template <typename U,
- typename = typename std::enable_if<
- std::is_convertible<U*, T*>::value>::type>
+ template <
+ typename U,
+ typename = typename std::enable_if<std::is_convertible_v<U*, T*>>::type>
scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
// Move constructor. This is required in addition to the move conversion
@@ -256,9 +256,9 @@
scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
// Move conversion constructor.
- template <typename U,
- typename = typename std::enable_if<
- std::is_convertible<U*, T*>::value>::type>
+ template <
+ typename U,
+ typename = typename std::enable_if<std::is_convertible_v<U*, T*>>::type>
scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
r.ptr_ = nullptr;
}
diff --git a/base/memory/shared_memory_mapping.h b/base/memory/shared_memory_mapping.h
index 011e1d8..d546deb 100644
--- a/base/memory/shared_memory_mapping.h
+++ b/base/memory/shared_memory_mapping.h
@@ -114,7 +114,7 @@
// large enough to contain a T, or nullptr otherwise.
template <typename T>
const T* GetMemoryAs() const {
- static_assert(std::is_trivially_copyable<T>::value,
+ static_assert(std::is_trivially_copyable_v<T>,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
@@ -132,7 +132,7 @@
// page-aligned.
template <typename T>
span<const T> GetMemoryAsSpan() const {
- static_assert(std::is_trivially_copyable<T>::value,
+ static_assert(std::is_trivially_copyable_v<T>,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
@@ -146,7 +146,7 @@
// first element, if any, is guaranteed to be page-aligned.
template <typename T>
span<const T> GetMemoryAsSpan(size_t count) const {
- static_assert(std::is_trivially_copyable<T>::value,
+ static_assert(std::is_trivially_copyable_v<T>,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
@@ -189,7 +189,7 @@
// enough to contain a T, or nullptr otherwise.
template <typename T>
T* GetMemoryAs() const {
- static_assert(std::is_trivially_copyable<T>::value,
+ static_assert(std::is_trivially_copyable_v<T>,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
@@ -206,7 +206,7 @@
// The first element, if any, is guaranteed to be page-aligned.
template <typename T>
span<T> GetMemoryAsSpan() const {
- static_assert(std::is_trivially_copyable<T>::value,
+ static_assert(std::is_trivially_copyable_v<T>,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
@@ -220,7 +220,7 @@
// element, if any, is guaranteed to be page-aligned.
template <typename T>
span<T> GetMemoryAsSpan(size_t count) const {
- static_assert(std::is_trivially_copyable<T>::value,
+ static_assert(std::is_trivially_copyable_v<T>,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 3db3911..ce81874 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -183,9 +183,8 @@
// Precondition: t != nullptr
template<typename Derived>
static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
- static_assert(
- std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
- "AsWeakPtr argument must inherit from SupportsWeakPtr");
+ static_assert(std::is_base_of_v<internal::SupportsWeakPtrBase, Derived>,
+ "AsWeakPtr argument must inherit from SupportsWeakPtr");
using Base = typename decltype(ExtractSinglyInheritedBase(t))::Base;
// Ensure SupportsWeakPtr<Base>::AsWeakPtr() is called even if the subclass
// hides or overloads it.
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
index 6807fc7..1cf2bf4 100644
--- a/base/metrics/field_trial_params.h
+++ b/base/metrics/field_trial_params.h
@@ -133,12 +133,12 @@
//
// Getting a param value from a FeatureParam<T> will have the same semantics as
// GetFieldTrialParamValueByFeature(), see that function's comments for details.
-template <typename T, bool IsEnum = std::is_enum<T>::value>
+template <typename T, bool IsEnum = std::is_enum_v<T>>
struct FeatureParam {
// Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
// in its definition so that evaluation is deferred until the template is
// instantiated.
- static_assert(!std::is_same<T, T>::value, "unsupported FeatureParam<> type");
+ static_assert(!std::is_same_v<T, T>, "unsupported FeatureParam<> type");
};
// Declares a string-valued parameter. Example:
diff --git a/base/metrics/histogram_functions.h b/base/metrics/histogram_functions.h
index 2717fbc..e46afc1 100644
--- a/base/metrics/histogram_functions.h
+++ b/base/metrics/histogram_functions.h
@@ -69,7 +69,7 @@
// Keep them synchronized.
template <typename T>
void UmaHistogramEnumeration(const std::string& name, T sample) {
- static_assert(std::is_enum<T>::value, "T is not an enum.");
+ static_assert(std::is_enum_v<T>, "T is not an enum.");
// This also ensures that an enumeration that doesn't define kMaxValue fails
// with a semi-useful error ("no member named 'kMaxValue' in ...").
static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
@@ -83,7 +83,7 @@
template <typename T>
void UmaHistogramEnumeration(const char* name, T sample) {
- static_assert(std::is_enum<T>::value, "T is not an enum.");
+ static_assert(std::is_enum_v<T>, "T is not an enum.");
// This also ensures that an enumeration that doesn't define kMaxValue fails
// with a semi-useful error ("no member named 'kMaxValue' in ...").
static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
@@ -113,7 +113,7 @@
// otherwise functionally equivalent to the above.
template <typename T>
void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
- static_assert(std::is_enum<T>::value, "T is not an enum.");
+ static_assert(std::is_enum_v<T>, "T is not an enum.");
DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
return UmaHistogramExactLinear(name, static_cast<int>(sample),
@@ -122,7 +122,7 @@
template <typename T>
void UmaHistogramEnumeration(const char* name, T sample, T enum_size) {
- static_assert(std::is_enum<T>::value, "T is not an enum.");
+ static_assert(std::is_enum_v<T>, "T is not an enum.");
DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
return UmaHistogramExactLinear(name, static_cast<int>(sample),
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
index 5ec9643..91b0ff9 100644
--- a/base/metrics/histogram_macros_internal.h
+++ b/base/metrics/histogram_macros_internal.h
@@ -40,7 +40,7 @@
template <typename Enum>
struct EnumSizeTraits<
Enum,
- std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
+ std::enable_if_t<std::is_enum_v<decltype(Enum::kMaxValue)>>> {
static constexpr Enum Count() {
// If you're getting
// note: integer value X is outside the valid range of values [0, X] for
@@ -141,9 +141,9 @@
#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary, \
flag) \
do { \
- static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value, \
+ static_assert(!std::is_enum_v<std::decay_t<decltype(sample)>>, \
"|sample| should not be an enum type!"); \
- static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value, \
+ static_assert(!std::is_enum_v<std::decay_t<decltype(boundary)>>, \
"|boundary| should not be an enum type!"); \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, Add(sample), \
@@ -157,9 +157,9 @@
#define INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG( \
name, sample, count, boundary, scale, flag) \
do { \
- static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value, \
+ static_assert(!std::is_enum_v<std::decay_t<decltype(sample)>>, \
"|sample| should not be an enum type!"); \
- static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value, \
+ static_assert(!std::is_enum_v<std::decay_t<decltype(boundary)>>, \
"|boundary| should not be an enum type!"); \
class ScaledLinearHistogramInstance : public base::ScaledLinearHistogram { \
public: \
@@ -206,12 +206,12 @@
do { \
using decayed_sample = std::decay<decltype(sample)>::type; \
using decayed_boundary = std::decay<decltype(boundary)>::type; \
- static_assert(!std::is_enum<decayed_boundary>::value || \
- std::is_enum<decayed_sample>::value, \
- "Unexpected: |boundary| is enum, but |sample| is not."); \
- static_assert(!std::is_enum<decayed_sample>::value || \
- !std::is_enum<decayed_boundary>::value || \
- std::is_same<decayed_sample, decayed_boundary>::value, \
+ static_assert( \
+ !std::is_enum_v<decayed_boundary> || std::is_enum_v<decayed_sample>, \
+ "Unexpected: |boundary| is enum, but |sample| is not."); \
+ static_assert(!std::is_enum_v<decayed_sample> || \
+ !std::is_enum_v<decayed_boundary> || \
+ std::is_same_v<decayed_sample, decayed_boundary>, \
"|sample| and |boundary| shouldn't be of different enums"); \
static_assert( \
static_cast<uintmax_t>(boundary) < \
@@ -227,7 +227,7 @@
scale, flag) \
do { \
using decayed_sample = std::decay<decltype(sample)>::type; \
- static_assert(std::is_enum<decayed_sample>::value, \
+ static_assert(std::is_enum_v<decayed_sample>, \
"Unexpected: |sample| is not at enum."); \
constexpr auto boundary = base::internal::EnumSizeTraits< \
std::decay_t<decltype(sample)>>::Count(); \
diff --git a/base/metrics/histogram_unittest.nc b/base/metrics/histogram_unittest.nc
index c677106..5b5acad 100644
--- a/base/metrics/histogram_unittest.nc
+++ b/base/metrics/histogram_unittest.nc
@@ -67,7 +67,7 @@
UmaHistogramEnumeration("", NoMaxValue::kMoo);
}
-#elif defined(NCTEST_FUNCTION_INT_AS_ENUM) // [r"static assertion failed due to requirement 'std::is_enum<int>::value'"]
+#elif defined(NCTEST_FUNCTION_INT_AS_ENUM) // [r"static assertion failed due to requirement 'std::is_enum_v<int>'"]
void WontCompile() {
UmaHistogramEnumeration("", 1, 2);
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index 7335c00..82a562c 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -425,16 +425,16 @@
// based on knowledge of how the allocator is being used.
template <typename T>
T* GetAsObject(Reference ref) {
- static_assert(std::is_standard_layout<T>::value, "only standard objects");
- static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+ static_assert(std::is_standard_layout_v<T>, "only standard objects");
+ static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
return const_cast<T*>(reinterpret_cast<volatile T*>(
GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
}
template <typename T>
const T* GetAsObject(Reference ref) const {
- static_assert(std::is_standard_layout<T>::value, "only standard objects");
- static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+ static_assert(std::is_standard_layout_v<T>, "only standard objects");
+ static_assert(!std::is_array_v<T>, "use GetAsArray<>()");
static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
return const_cast<const T*>(reinterpret_cast<const volatile T*>(
GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
@@ -453,13 +453,13 @@
// as char, float, double, or (u)intXX_t.
template <typename T>
T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
- static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+ static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
return const_cast<T*>(reinterpret_cast<volatile T*>(
GetBlockData(ref, type_id, count * sizeof(T))));
}
template <typename T>
const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
- static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+ static_assert(std::is_fundamental_v<T>, "use GetAsObject<>()");
return const_cast<const char*>(reinterpret_cast<const volatile T*>(
GetBlockData(ref, type_id, count * sizeof(T))));
}
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index 4389f92..8069a51 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -22,12 +22,26 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/values.h"
+#include "build/build_config.h"
namespace base {
namespace {
// Whether a 50/50 trial for using a R/W lock should be run.
+// Restrict it to Windows for now as other platforms show poor results.
+#if BUILDFLAG(IS_WIN)
constexpr bool kRunRwLockTrial = true;
+#else
+constexpr bool kRunRwLockTrial = false;
+#endif // BUILDFLAG(IS_WIN)
+
+// Whether the R/W lock should be used when the trial is not active.
+// Only enabled on Windows for now, since other platforms show poor results.
+#if BUILDFLAG(IS_WIN)
+constexpr bool kUseRwLockByDefault = true;
+#else
+constexpr bool kUseRwLockByDefault = false;
+#endif // BUILDFLAG(IS_WIN)
bool EnableBenchmarking() {
// TODO(asvitkine): If this code ends up not being temporary, refactor it to
@@ -344,7 +358,7 @@
if (kRunRwLockTrial && !EnableBenchmarking()) {
return RandInt(0, 1) == 1;
}
- return true;
+ return kUseRwLockByDefault;
}
HistogramBase* StatisticsRecorder::FindHistogramByHashInternal(
diff --git a/base/numerics/checked_math.h b/base/numerics/checked_math.h
index c84a4f9..c25bef0 100644
--- a/base/numerics/checked_math.h
+++ b/base/numerics/checked_math.h
@@ -17,7 +17,7 @@
template <typename T>
class CheckedNumeric {
- static_assert(std::is_arithmetic<T>::value,
+ static_assert(std::is_arithmetic_v<T>,
"CheckedNumeric<T>: T must be a numeric type.");
public:
@@ -42,7 +42,7 @@
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to CheckedNumerics to make them easier to use.
template <typename Src,
- typename = std::enable_if_t<std::is_arithmetic<Src>::value>>
+ typename = std::enable_if_t<std::is_arithmetic_v<Src>>>
// NOLINTNEXTLINE(google-explicit-constructor)
constexpr CheckedNumeric(Src value) : state_(value) {}
@@ -144,14 +144,14 @@
constexpr CheckedNumeric operator-() const {
// Use an optimized code path for a known run-time variable.
- if (!IsConstantEvaluated() && std::is_signed<T>::value &&
- std::is_floating_point<T>::value) {
+ if (!IsConstantEvaluated() && std::is_signed_v<T> &&
+ std::is_floating_point_v<T>) {
return FastRuntimeNegate();
}
// The negation of two's complement int min is int min.
const bool is_valid =
IsValid() &&
- (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ (!std::is_signed_v<T> || std::is_floating_point_v<T> ||
NegateWrapper(state_.value()) != std::numeric_limits<T>::lowest());
return CheckedNumeric<T>(NegateWrapper(state_.value()), is_valid);
}
diff --git a/base/numerics/checked_math_impl.h b/base/numerics/checked_math_impl.h
index fa45480..6b463d8 100644
--- a/base/numerics/checked_math_impl.h
+++ b/base/numerics/checked_math_impl.h
@@ -22,7 +22,7 @@
template <typename T>
constexpr bool CheckedAddImpl(T x, T y, T* result) {
- static_assert(std::is_integral<T>::value, "Type must be integral");
+ static_assert(std::is_integral_v<T>, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
using UnsignedDst = typename std::make_unsigned<T>::type;
@@ -32,10 +32,11 @@
const UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
// Addition is valid if the sign of (x + y) is equal to either that of x or
// that of y.
- if (std::is_signed<T>::value
+ if (std::is_signed_v<T>
? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) < 0
- : uresult < uy) // Unsigned is either valid or underflow.
+ : uresult < uy) { // Unsigned is either valid or underflow.
return false;
+ }
*result = static_cast<T>(uresult);
return true;
}
@@ -46,8 +47,8 @@
template <typename T, typename U>
struct CheckedAddOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -85,7 +86,7 @@
template <typename T>
constexpr bool CheckedSubImpl(T x, T y, T* result) {
- static_assert(std::is_integral<T>::value, "Type must be integral");
+ static_assert(std::is_integral_v<T>, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
using UnsignedDst = typename std::make_unsigned<T>::type;
@@ -95,10 +96,11 @@
const UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
// Subtraction is valid if either x and y have same sign, or (x-y) and x have
// the same sign.
- if (std::is_signed<T>::value
+ if (std::is_signed_v<T>
? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) < 0
- : x < y)
+ : x < y) {
return false;
+ }
*result = static_cast<T>(uresult);
return true;
}
@@ -109,8 +111,8 @@
template <typename T, typename U>
struct CheckedSubOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -148,7 +150,7 @@
template <typename T>
constexpr bool CheckedMulImpl(T x, T y, T* result) {
- static_assert(std::is_integral<T>::value, "Type must be integral");
+ static_assert(std::is_integral_v<T>, "Type must be integral");
// Since the value of x*y is potentially undefined if we have a signed type,
// we compute it using the unsigned type of the same size.
using UnsignedDst = typename std::make_unsigned<T>::type;
@@ -157,13 +159,14 @@
const UnsignedDst uy = SafeUnsignedAbs(y);
const UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
const bool is_negative =
- std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+ std::is_signed_v<T> && static_cast<SignedDst>(x ^ y) < 0;
// We have a fast out for unsigned identity or zero on the second operand.
// After that it's an unsigned overflow check on the absolute value, with
// a +1 bound for a negative result.
- if (uy > UnsignedDst(!std::is_signed<T>::value || is_negative) &&
- ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy)
+ if (uy > UnsignedDst(!std::is_signed_v<T> || is_negative) &&
+ ux > (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy) {
return false;
+ }
*result = static_cast<T>(is_negative ? 0 - uresult : uresult);
return true;
}
@@ -174,8 +177,8 @@
template <typename T, typename U>
struct CheckedMulOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -219,8 +222,8 @@
template <typename T, typename U>
struct CheckedDivOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -231,7 +234,7 @@
// combination of types needed to trigger this case.
using Promotion = typename BigEnoughPromotion<T, U>::type;
if (BASE_NUMERICS_UNLIKELY(
- (std::is_signed<T>::value && std::is_signed<U>::value &&
+ (std::is_signed_v<T> && std::is_signed_v<U> &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) ==
std::numeric_limits<Promotion>::lowest() &&
@@ -260,8 +263,8 @@
template <typename T, typename U>
struct CheckedModOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -270,7 +273,7 @@
using Promotion = typename BigEnoughPromotion<T, U>::type;
if (BASE_NUMERICS_UNLIKELY(
- (std::is_signed<T>::value && std::is_signed<U>::value &&
+ (std::is_signed_v<T> && std::is_signed_v<U> &&
IsTypeInRangeForNumericType<T, Promotion>::value &&
static_cast<Promotion>(x) ==
std::numeric_limits<Promotion>::lowest() &&
@@ -297,8 +300,8 @@
template <typename T, typename U>
struct CheckedLshOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = T;
template <typename V>
static constexpr bool Do(T x, U shift, V* result) {
@@ -313,9 +316,10 @@
}
// Handle the legal corner-case of a full-width signed shift of zero.
- if (!std::is_signed<T>::value || x ||
- as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits))
+ if (!std::is_signed_v<T> || x ||
+ as_unsigned(shift) != as_unsigned(std::numeric_limits<T>::digits)) {
return false;
+ }
*result = 0;
return true;
}
@@ -330,8 +334,8 @@
template <typename T, typename U>
struct CheckedRshOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = T;
template <typename V>
static constexpr bool Do(T x, U shift, V* result) {
@@ -356,8 +360,8 @@
template <typename T, typename U>
struct CheckedAndOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
@@ -378,8 +382,8 @@
template <typename T, typename U>
struct CheckedOrOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
@@ -400,8 +404,8 @@
template <typename T, typename U>
struct CheckedXorOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
@@ -421,11 +425,10 @@
struct CheckedMaxOp {};
template <typename T, typename U>
-struct CheckedMaxOp<
- T,
- U,
- typename std::enable_if<std::is_arithmetic<T>::value &&
- std::is_arithmetic<U>::value>::type> {
+struct CheckedMaxOp<T,
+ U,
+ typename std::enable_if<std::is_arithmetic_v<T> &&
+ std::is_arithmetic_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -445,11 +448,10 @@
struct CheckedMinOp {};
template <typename T, typename U>
-struct CheckedMinOp<
- T,
- U,
- typename std::enable_if<std::is_arithmetic<T>::value &&
- std::is_arithmetic<U>::value>::type> {
+struct CheckedMinOp<T,
+ U,
+ typename std::enable_if<std::is_arithmetic_v<T> &&
+ std::is_arithmetic_v<U>>::type> {
using result_type = typename LowestValuePromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
@@ -465,22 +467,22 @@
// This is just boilerplate that wraps the standard floating point arithmetic.
// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
- template <typename T, typename U> \
- struct Checked##NAME##Op< \
- T, U, \
- typename std::enable_if<std::is_floating_point<T>::value || \
- std::is_floating_point<U>::value>::type> { \
- using result_type = typename MaxExponentPromotion<T, U>::type; \
- template <typename V> \
- static constexpr bool Do(T x, U y, V* result) { \
- using Promotion = typename MaxExponentPromotion<T, U>::type; \
- const Promotion presult = x OP y; \
- if (!IsValueInRangeForNumericType<V>(presult)) \
- return false; \
- *result = static_cast<V>(presult); \
- return true; \
- } \
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Checked##NAME##Op< \
+ T, U, \
+ typename std::enable_if<std::is_floating_point_v<T> || \
+ std::is_floating_point_v<U>>::type> { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V> \
+ static constexpr bool Do(T x, U y, V* result) { \
+ using Promotion = typename MaxExponentPromotion<T, U>::type; \
+ const Promotion presult = x OP y; \
+ if (!IsValueInRangeForNumericType<V>(presult)) \
+ return false; \
+ *result = static_cast<V>(presult); \
+ return true; \
+ } \
};
BASE_FLOAT_ARITHMETIC_OPS(Add, +)
@@ -502,10 +504,10 @@
template <typename NumericType>
struct GetNumericRepresentation {
static const NumericRepresentation value =
- std::is_integral<NumericType>::value
+ std::is_integral_v<NumericType>
? NUMERIC_INTEGER
- : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
- : NUMERIC_UNKNOWN);
+ : (std::is_floating_point_v<NumericType> ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
};
template <typename T,
@@ -520,7 +522,7 @@
constexpr explicit CheckedNumericState(Src value = 0, bool is_valid = true)
: is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
value_(WellDefinedConversionOrZero(value, is_valid_)) {
- static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
}
template <typename Src>
@@ -536,9 +538,8 @@
template <typename Src>
static constexpr T WellDefinedConversionOrZero(Src value, bool is_valid) {
using SrcType = typename internal::UnderlyingType<Src>::type;
- return (std::is_integral<SrcType>::value || is_valid)
- ? static_cast<T>(value)
- : 0;
+ return (std::is_integral_v<SrcType> || is_valid) ? static_cast<T>(value)
+ : 0;
}
// is_valid_ precedes value_ because member initializers in the constructors
diff --git a/base/numerics/clamped_math.h b/base/numerics/clamped_math.h
index a72e4a7..66112ed 100644
--- a/base/numerics/clamped_math.h
+++ b/base/numerics/clamped_math.h
@@ -17,7 +17,7 @@
template <typename T>
class ClampedNumeric {
- static_assert(std::is_arithmetic<T>::value,
+ static_assert(std::is_arithmetic_v<T>,
"ClampedNumeric<T>: T must be a numeric type.");
public:
diff --git a/base/numerics/clamped_math_impl.h b/base/numerics/clamped_math_impl.h
index 373b1ae..dc02e0c 100644
--- a/base/numerics/clamped_math_impl.h
+++ b/base/numerics/clamped_math_impl.h
@@ -22,8 +22,8 @@
namespace internal {
template <typename T,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_signed<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_signed_v<T>>::type* = nullptr>
constexpr T SaturatedNegWrapper(T value) {
return IsConstantEvaluated() || !ClampedNegFastOp<T>::is_supported
? (NegateWrapper(value) != std::numeric_limits<T>::lowest()
@@ -33,21 +33,20 @@
}
template <typename T,
- typename std::enable_if<std::is_integral<T>::value &&
- !std::is_signed<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_integral_v<T> &&
+ !std::is_signed_v<T>>::type* = nullptr>
constexpr T SaturatedNegWrapper(T value) {
return T(0);
}
-template <
- typename T,
- typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T,
+ typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
constexpr T SaturatedNegWrapper(T value) {
return -value;
}
template <typename T,
- typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
constexpr T SaturatedAbsWrapper(T value) {
// The calculation below is a static identity for unsigned types, but for
// signed integer types it provides a non-branching, saturated absolute value.
@@ -62,9 +61,8 @@
IsValueNegative<T>(static_cast<T>(SafeUnsignedAbs(value))));
}
-template <
- typename T,
- typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T,
+ typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
constexpr T SaturatedAbsWrapper(T value) {
return value < 0 ? -value : value;
}
@@ -75,15 +73,15 @@
template <typename T, typename U>
struct ClampedAddOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
if (!IsConstantEvaluated() && ClampedAddFastOp<T, U>::is_supported)
return ClampedAddFastOp<T, U>::template Do<V>(x, y);
- static_assert(std::is_same<V, result_type>::value ||
+ static_assert(std::is_same_v<V, result_type> ||
IsTypeInRangeForNumericType<U, V>::value,
"The saturation result cannot be determined from the "
"provided types.");
@@ -101,15 +99,15 @@
template <typename T, typename U>
struct ClampedSubOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
if (!IsConstantEvaluated() && ClampedSubFastOp<T, U>::is_supported)
return ClampedSubFastOp<T, U>::template Do<V>(x, y);
- static_assert(std::is_same<V, result_type>::value ||
+ static_assert(std::is_same_v<V, result_type> ||
IsTypeInRangeForNumericType<U, V>::value,
"The saturation result cannot be determined from the "
"provided types.");
@@ -127,8 +125,8 @@
template <typename T, typename U>
struct ClampedMulOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
@@ -150,8 +148,8 @@
template <typename T, typename U>
struct ClampedDivOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
@@ -170,8 +168,8 @@
template <typename T, typename U>
struct ClampedModOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
@@ -190,12 +188,12 @@
template <typename T, typename U>
struct ClampedLshOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = T;
template <typename V = result_type>
static constexpr V Do(T x, U shift) {
- static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
if (BASE_NUMERICS_LIKELY(shift < std::numeric_limits<T>::digits)) {
// Shift as unsigned to avoid undefined behavior.
V result = static_cast<V>(as_unsigned(x) << shift);
@@ -214,12 +212,12 @@
template <typename T, typename U>
struct ClampedRshOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = T;
template <typename V = result_type>
static constexpr V Do(T x, U shift) {
- static_assert(!std::is_signed<U>::value, "Shift value must be unsigned.");
+ static_assert(!std::is_signed_v<U>, "Shift value must be unsigned.");
// Signed right shift is odd, because it saturates to -1 or 0.
const V saturated = as_unsigned(V(0)) - IsValueNegative(x);
return BASE_NUMERICS_LIKELY(shift < IntegerBitsPlusSign<T>::value)
@@ -234,8 +232,8 @@
template <typename T, typename U>
struct ClampedAndOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
@@ -251,8 +249,8 @@
template <typename T, typename U>
struct ClampedOrOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
@@ -268,8 +266,8 @@
template <typename T, typename U>
struct ClampedXorOp<T,
U,
- typename std::enable_if<std::is_integral<T>::value &&
- std::is_integral<U>::value>::type> {
+ typename std::enable_if<std::is_integral_v<T> &&
+ std::is_integral_v<U>>::type> {
using result_type = typename std::make_unsigned<
typename MaxExponentPromotion<T, U>::type>::type;
template <typename V>
@@ -282,11 +280,10 @@
struct ClampedMaxOp {};
template <typename T, typename U>
-struct ClampedMaxOp<
- T,
- U,
- typename std::enable_if<std::is_arithmetic<T>::value &&
- std::is_arithmetic<U>::value>::type> {
+struct ClampedMaxOp<T,
+ U,
+ typename std::enable_if<std::is_arithmetic_v<T> &&
+ std::is_arithmetic_v<U>>::type> {
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
@@ -299,11 +296,10 @@
struct ClampedMinOp {};
template <typename T, typename U>
-struct ClampedMinOp<
- T,
- U,
- typename std::enable_if<std::is_arithmetic<T>::value &&
- std::is_arithmetic<U>::value>::type> {
+struct ClampedMinOp<T,
+ U,
+ typename std::enable_if<std::is_arithmetic_v<T> &&
+ std::is_arithmetic_v<U>>::type> {
using result_type = typename LowestValuePromotion<T, U>::type;
template <typename V = result_type>
static constexpr V Do(T x, U y) {
@@ -314,17 +310,17 @@
// This is just boilerplate that wraps the standard floating point arithmetic.
// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
-#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
- template <typename T, typename U> \
- struct Clamped##NAME##Op< \
- T, U, \
- typename std::enable_if<std::is_floating_point<T>::value || \
- std::is_floating_point<U>::value>::type> { \
- using result_type = typename MaxExponentPromotion<T, U>::type; \
- template <typename V = result_type> \
- static constexpr V Do(T x, U y) { \
- return saturated_cast<V>(x OP y); \
- } \
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Clamped##NAME##Op< \
+ T, U, \
+ typename std::enable_if<std::is_floating_point_v<T> || \
+ std::is_floating_point_v<U>>::type> { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V = result_type> \
+ static constexpr V Do(T x, U y) { \
+ return saturated_cast<V>(x OP y); \
+ } \
};
BASE_FLOAT_ARITHMETIC_OPS(Add, +)
diff --git a/base/numerics/ranges.h b/base/numerics/ranges.h
index 2d8c8b7..0d18964 100644
--- a/base/numerics/ranges.h
+++ b/base/numerics/ranges.h
@@ -12,7 +12,7 @@
template <typename T>
constexpr bool IsApproximatelyEqual(T lhs, T rhs, T tolerance) {
- static_assert(std::is_arithmetic<T>::value, "Argument must be arithmetic");
+ static_assert(std::is_arithmetic_v<T>, "Argument must be arithmetic");
return std::abs(rhs - lhs) <= tolerance;
}
diff --git a/base/numerics/safe_conversions.h b/base/numerics/safe_conversions.h
index 7d1e305..953ff59 100644
--- a/base/numerics/safe_conversions.h
+++ b/base/numerics/safe_conversions.h
@@ -52,8 +52,8 @@
Dst,
Src,
typename std::enable_if<
- std::is_integral<Dst>::value && std::is_integral<Src>::value &&
- std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ std::is_integral_v<Dst> && std::is_integral_v<Src> &&
+ std::is_signed_v<Dst> && std::is_signed_v<Src> &&
!IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
static constexpr bool is_supported = true;
@@ -70,8 +70,8 @@
Dst,
Src,
typename std::enable_if<
- std::is_integral<Dst>::value && std::is_integral<Src>::value &&
- !std::is_signed<Dst>::value && std::is_signed<Src>::value &&
+ std::is_integral_v<Dst> && std::is_integral_v<Src> &&
+ !std::is_signed_v<Dst> && std::is_signed_v<Src> &&
!IsTypeInRangeForNumericType<Dst, Src>::value>::type> {
static constexpr bool is_supported = true;
@@ -148,7 +148,7 @@
? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
: S<Dst>::Underflow())
// Skip this check for integral Src, which cannot be NaN.
- : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ : (std::is_integral_v<Src> || !constraint.IsUnderflowFlagSet()
? S<Dst>::Overflow()
: S<Dst>::NaN());
}
@@ -166,12 +166,11 @@
};
template <typename Dst, typename Src>
-struct SaturateFastOp<
- Dst,
- Src,
- typename std::enable_if<std::is_integral<Src>::value &&
- std::is_integral<Dst>::value &&
- SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+struct SaturateFastOp<Dst,
+ Src,
+ typename std::enable_if<
+ std::is_integral_v<Src> && std::is_integral_v<Dst> &&
+ SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
static constexpr bool is_supported = true;
static constexpr Dst Do(Src value) {
return SaturateFastAsmOp<Dst, Src>::Do(value);
@@ -179,12 +178,11 @@
};
template <typename Dst, typename Src>
-struct SaturateFastOp<
- Dst,
- Src,
- typename std::enable_if<std::is_integral<Src>::value &&
- std::is_integral<Dst>::value &&
- !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
+struct SaturateFastOp<Dst,
+ Src,
+ typename std::enable_if<
+ std::is_integral_v<Src> && std::is_integral_v<Dst> &&
+ !SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
static constexpr bool is_supported = true;
static constexpr Dst Do(Src value) {
// The exact order of the following is structured to hit the correct
@@ -225,7 +223,7 @@
constexpr Dst strict_cast(Src value) {
using SrcType = typename UnderlyingType<Src>::type;
static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
- static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
// If you got here from a compiler error, it's because you tried to assign
// from a source type to a destination type that has insufficient range.
@@ -371,8 +369,8 @@
// Rounds towards negative infinity (i.e., down).
template <typename Dst = int,
typename Src,
- typename = std::enable_if_t<std::is_integral<Dst>::value &&
- std::is_floating_point<Src>::value>>
+ typename = std::enable_if_t<std::is_integral_v<Dst> &&
+ std::is_floating_point_v<Src>>>
Dst ClampFloor(Src value) {
return saturated_cast<Dst>(std::floor(value));
}
@@ -380,8 +378,8 @@
// Rounds towards positive infinity (i.e., up).
template <typename Dst = int,
typename Src,
- typename = std::enable_if_t<std::is_integral<Dst>::value &&
- std::is_floating_point<Src>::value>>
+ typename = std::enable_if_t<std::is_integral_v<Dst> &&
+ std::is_floating_point_v<Src>>>
Dst ClampCeil(Src value) {
return saturated_cast<Dst>(std::ceil(value));
}
@@ -397,8 +395,8 @@
// -1.5 to -2.
template <typename Dst = int,
typename Src,
- typename = std::enable_if_t<std::is_integral<Dst>::value &&
- std::is_floating_point<Src>::value>>
+ typename = std::enable_if_t<std::is_integral_v<Dst> &&
+ std::is_floating_point_v<Src>>>
Dst ClampRound(Src value) {
const Src rounded = std::round(value);
return saturated_cast<Dst>(rounded);
diff --git a/base/numerics/safe_conversions_arm_impl.h b/base/numerics/safe_conversions_arm_impl.h
index e4b5730..abbf71e 100644
--- a/base/numerics/safe_conversions_arm_impl.h
+++ b/base/numerics/safe_conversions_arm_impl.h
@@ -18,17 +18,17 @@
template <typename Dst, typename Src>
struct SaturateFastAsmOp {
static constexpr bool is_supported =
- kEnableAsmCode && std::is_signed<Src>::value &&
- std::is_integral<Dst>::value && std::is_integral<Src>::value &&
+ kEnableAsmCode && std::is_signed_v<Src> && std::is_integral_v<Dst> &&
+ std::is_integral_v<Src> &&
IntegerBitsPlusSign<Src>::value <= IntegerBitsPlusSign<int32_t>::value &&
IntegerBitsPlusSign<Dst>::value <= IntegerBitsPlusSign<int32_t>::value &&
!IsTypeInRangeForNumericType<Dst, Src>::value;
__attribute__((always_inline)) static Dst Do(Src value) {
int32_t src = value;
- typename std::conditional<std::is_signed<Dst>::value, int32_t,
- uint32_t>::type result;
- if (std::is_signed<Dst>::value) {
+ typename std::conditional<std::is_signed_v<Dst>, int32_t, uint32_t>::type
+ result;
+ if (std::is_signed_v<Dst>) {
asm("ssat %[dst], %[shift], %[src]"
: [dst] "=r"(result)
: [src] "r"(src), [shift] "n"(IntegerBitsPlusSign<Dst>::value <= 32
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index d0a9d1a..0488744 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -25,7 +25,7 @@
// we can compute an analog using std::numeric_limits<>::digits.
template <typename NumericType>
struct MaxExponent {
- static const int value = std::is_floating_point<NumericType>::value
+ static const int value = std::is_floating_point_v<NumericType>
? std::numeric_limits<NumericType>::max_exponent
: std::numeric_limits<NumericType>::digits + 1;
};
@@ -34,8 +34,8 @@
// hacks.
template <typename NumericType>
struct IntegerBitsPlusSign {
- static const int value = std::numeric_limits<NumericType>::digits +
- std::is_signed<NumericType>::value;
+ static const int value =
+ std::numeric_limits<NumericType>::digits + std::is_signed_v<NumericType>;
};
// Helper templates for integer manipulations.
@@ -48,16 +48,16 @@
// Determines if a numeric value is negative without throwing compiler
// warnings on: unsigned(value) < 0.
template <typename T,
- typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_signed_v<T>>::type* = nullptr>
constexpr bool IsValueNegative(T value) {
- static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic_v<T>, "Argument must be numeric.");
return value < 0;
}
template <typename T,
- typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+ typename std::enable_if<!std::is_signed_v<T>>::type* = nullptr>
constexpr bool IsValueNegative(T) {
- static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic_v<T>, "Argument must be numeric.");
return false;
}
@@ -68,7 +68,7 @@
constexpr typename std::make_signed<T>::type ConditionalNegate(
T x,
bool is_negative) {
- static_assert(std::is_integral<T>::value, "Type must be integral");
+ static_assert(std::is_integral_v<T>, "Type must be integral");
using SignedT = typename std::make_signed<T>::type;
using UnsignedT = typename std::make_unsigned<T>::type;
return static_cast<SignedT>((static_cast<UnsignedT>(x) ^
@@ -79,7 +79,7 @@
// This performs a safe, absolute value via unsigned overflow.
template <typename T>
constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
- static_assert(std::is_integral<T>::value, "Type must be integral");
+ static_assert(std::is_integral_v<T>, "Type must be integral");
using UnsignedT = typename std::make_unsigned<T>::type;
return IsValueNegative(value)
? static_cast<UnsignedT>(0u - static_cast<UnsignedT>(value))
@@ -136,10 +136,10 @@
template <typename Dst,
typename Src,
- IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ IntegerRepresentation DstSign = std::is_signed_v<Dst>
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ IntegerRepresentation SrcSign = std::is_signed_v<Src>
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED>
struct StaticDstRangeRelationToSrcRange;
@@ -236,14 +236,13 @@
SrcLimits::digits < DstLimits::digits)
? (DstLimits::digits - SrcLimits::digits)
: 0;
- template <
- typename T,
- typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+ template <typename T,
+ typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
// Masks out the integer bits that are beyond the precision of the
// intermediate type used for comparison.
static constexpr T Adjust(T value) {
- static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(std::is_same_v<T, Dst>, "");
static_assert(kShift < DstLimits::digits, "");
using UnsignedDst = typename std::make_unsigned_t<T>;
return static_cast<T>(ConditionalNegate(
@@ -251,11 +250,11 @@
IsValueNegative(value)));
}
- template <typename T,
- typename std::enable_if<std::is_floating_point<T>::value>::type* =
- nullptr>
+ template <
+ typename T,
+ typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
static constexpr T Adjust(T value) {
- static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(std::is_same_v<T, Dst>, "");
static_assert(kShift == 0, "");
return value;
}
@@ -268,10 +267,10 @@
typename Src,
template <typename>
class Bounds,
- IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ IntegerRepresentation DstSign = std::is_signed_v<Dst>
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ IntegerRepresentation SrcSign = std::is_signed_v<Src>
? INTEGER_REPRESENTATION_SIGNED
: INTEGER_REPRESENTATION_UNSIGNED,
NumericRangeRepresentation DstRange =
@@ -373,7 +372,7 @@
bool ge_zero = false;
// Converting floating-point to integer will discard fractional part, so
// values in (-1.0, -0.0) will truncate to 0 and fit in Dst.
- if (std::is_floating_point<Src>::value) {
+ if (std::is_floating_point_v<Src>) {
ge_zero = value > Src(-1);
} else {
ge_zero = value >= Src(0);
@@ -399,8 +398,8 @@
template <typename> class Bounds = std::numeric_limits,
typename Src>
constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
- static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
- static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(std::is_arithmetic_v<Src>, "Argument must be numeric.");
+ static_assert(std::is_arithmetic_v<Dst>, "Result must be numeric.");
static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
}
@@ -412,7 +411,7 @@
#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
template <> \
struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
- std::is_signed<I>::value> { \
+ std::is_signed_v<I>> { \
using type = I; \
}
@@ -432,7 +431,7 @@
static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
"Max integer size not supported for this toolchain.");
-template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+template <typename Integer, bool IsSigned = std::is_signed_v<Integer>>
struct TwiceWiderInteger {
using type =
typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
@@ -467,13 +466,13 @@
template <typename Lhs,
typename Rhs,
ArithmeticPromotionCategory Promotion =
- std::is_signed<Lhs>::value
- ? (std::is_signed<Rhs>::value
+ std::is_signed_v<Lhs>
+ ? (std::is_signed_v<Rhs>
? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
? LEFT_PROMOTION
: RIGHT_PROMOTION)
: LEFT_PROMOTION)
- : (std::is_signed<Rhs>::value
+ : (std::is_signed_v<Rhs>
? RIGHT_PROMOTION
: (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
? LEFT_PROMOTION
@@ -495,16 +494,15 @@
typename Lhs,
typename Rhs = Lhs,
bool is_intmax_type =
- std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
- IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+ std::is_integral_v<typename MaxExponentPromotion<Lhs, Rhs>::type> &&
+ IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
value == IntegerBitsPlusSign<intmax_t>::value,
- bool is_max_exponent =
- StaticDstRangeRelationToSrcRange<
- typename MaxExponentPromotion<Lhs, Rhs>::type,
- Lhs>::value ==
- NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
- typename MaxExponentPromotion<Lhs, Rhs>::type,
- Rhs>::value == NUMERIC_RANGE_CONTAINED>
+ bool is_max_exponent = StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value == NUMERIC_RANGE_CONTAINED &&
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
struct BigEnoughPromotion;
// The side with the max exponent is big enough.
@@ -519,8 +517,8 @@
struct BigEnoughPromotion<Lhs, Rhs, false, false> {
using type =
typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
- std::is_signed<Lhs>::value ||
- std::is_signed<Rhs>::value>::type;
+ std::is_signed_v<Lhs> ||
+ std::is_signed_v<Rhs>>::type;
static const bool is_contained = true;
};
@@ -538,12 +536,11 @@
template <typename T, typename Lhs, typename Rhs = Lhs>
struct IsIntegerArithmeticSafe {
static const bool value =
- !std::is_floating_point<T>::value &&
- !std::is_floating_point<Lhs>::value &&
- !std::is_floating_point<Rhs>::value &&
- std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ !std::is_floating_point_v<T> && !std::is_floating_point_v<Lhs> &&
+ !std::is_floating_point_v<Rhs> &&
+ std::is_signed_v<T> >= std::is_signed_v<Lhs> &&
IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
- std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ std::is_signed_v<T> >= std::is_signed_v<Rhs> &&
IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
};
@@ -552,8 +549,8 @@
template <typename Lhs,
typename Rhs,
bool is_promotion_possible = IsIntegerArithmeticSafe<
- typename std::conditional<std::is_signed<Lhs>::value ||
- std::is_signed<Rhs>::value,
+ typename std::conditional<std::is_signed_v<Lhs> ||
+ std::is_signed_v<Rhs>,
intmax_t,
uintmax_t>::type,
typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
@@ -563,8 +560,8 @@
struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
using type =
typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
- std::is_signed<Lhs>::value ||
- std::is_signed<Rhs>::value>::type;
+ std::is_signed_v<Lhs> ||
+ std::is_signed_v<Rhs>>::type;
static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
static const bool is_contained = true;
};
@@ -576,19 +573,19 @@
};
// Extracts the underlying type from an enum.
-template <typename T, bool is_enum = std::is_enum<T>::value>
+template <typename T, bool is_enum = std::is_enum_v<T>>
struct ArithmeticOrUnderlyingEnum;
template <typename T>
struct ArithmeticOrUnderlyingEnum<T, true> {
using type = typename std::underlying_type<T>::type;
- static const bool value = std::is_arithmetic<type>::value;
+ static const bool value = std::is_arithmetic_v<type>;
};
template <typename T>
struct ArithmeticOrUnderlyingEnum<T, false> {
using type = T;
- static const bool value = std::is_arithmetic<type>::value;
+ static const bool value = std::is_arithmetic_v<type>;
};
// The following are helper templates used in the CheckedNumeric class.
@@ -605,7 +602,7 @@
template <typename T>
struct UnderlyingType {
using type = typename ArithmeticOrUnderlyingEnum<T>::type;
- static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_numeric = std::is_arithmetic_v<type>;
static const bool is_checked = false;
static const bool is_clamped = false;
static const bool is_strict = false;
@@ -669,7 +666,7 @@
constexpr typename std::make_signed<
typename base::internal::UnderlyingType<Src>::type>::type
as_signed(const Src value) {
- static_assert(std::is_integral<decltype(as_signed(value))>::value,
+ static_assert(std::is_integral_v<decltype(as_signed(value))>,
"Argument must be a signed or unsigned integer type.");
return static_cast<decltype(as_signed(value))>(value);
}
@@ -681,7 +678,7 @@
constexpr typename std::make_unsigned<
typename base::internal::UnderlyingType<Src>::type>::type
as_unsigned(const Src value) {
- static_assert(std::is_integral<decltype(as_unsigned(value))>::value,
+ static_assert(std::is_integral_v<decltype(as_unsigned(value))>,
"Argument must be a signed or unsigned integer type.");
return static_cast<decltype(as_unsigned(value))>(value);
}
@@ -698,7 +695,7 @@
template <typename L, typename R>
struct IsLess {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -718,7 +715,7 @@
template <typename L, typename R>
struct IsLessOrEqual {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -738,7 +735,7 @@
template <typename L, typename R>
struct IsGreater {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -758,7 +755,7 @@
template <typename L, typename R>
struct IsGreaterOrEqual {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
@@ -768,7 +765,7 @@
template <typename L, typename R>
struct IsEqual {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return DstRangeRelationToSrcRange<R>(lhs) ==
@@ -780,7 +777,7 @@
template <typename L, typename R>
struct IsNotEqual {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
static constexpr bool Test(const L lhs, const R rhs) {
return DstRangeRelationToSrcRange<R>(lhs) !=
@@ -794,7 +791,7 @@
// Binary arithmetic operations.
template <template <typename, typename> class C, typename L, typename R>
constexpr bool SafeCompare(const L lhs, const R rhs) {
- static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ static_assert(std::is_arithmetic_v<L> && std::is_arithmetic_v<R>,
"Types must be numeric.");
using Promotion = BigEnoughPromotion<L, R>;
using BigType = typename Promotion::type;
diff --git a/base/numerics/safe_math_clang_gcc_impl.h b/base/numerics/safe_math_clang_gcc_impl.h
index b45388c..c5a89d9 100644
--- a/base/numerics/safe_math_clang_gcc_impl.h
+++ b/base/numerics/safe_math_clang_gcc_impl.h
@@ -136,7 +136,7 @@
template <typename T>
struct ClampedNegFastOp {
- static const bool is_supported = std::is_signed<T>::value;
+ static const bool is_supported = std::is_signed_v<T>;
__attribute__((always_inline)) static T Do(T value) {
// Use this when there is no assembler path available.
if (!ClampedSubFastAsmOp<T, T>::is_supported) {
diff --git a/base/numerics/safe_math_shared_impl.h b/base/numerics/safe_math_shared_impl.h
index 7ba4ed7..80ba1ad 100644
--- a/base/numerics/safe_math_shared_impl.h
+++ b/base/numerics/safe_math_shared_impl.h
@@ -115,8 +115,8 @@
// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
// so the float versions will not compile.
template <typename Numeric,
- bool IsInteger = std::is_integral<Numeric>::value,
- bool IsFloat = std::is_floating_point<Numeric>::value>
+ bool IsInteger = std::is_integral_v<Numeric>,
+ bool IsFloat = std::is_floating_point_v<Numeric>>
struct UnsignedOrFloatForSize;
template <typename Numeric>
@@ -135,35 +135,33 @@
// if an overflow occurred.
template <typename T,
- typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
constexpr T NegateWrapper(T value) {
using UnsignedT = typename std::make_unsigned<T>::type;
// This will compile to a NEG on Intel, and is normal negation on ARM.
return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
}
-template <
- typename T,
- typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T,
+ typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
constexpr T NegateWrapper(T value) {
return -value;
}
template <typename T,
- typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
return ~value;
}
template <typename T,
- typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+ typename std::enable_if<std::is_integral_v<T>>::type* = nullptr>
constexpr T AbsWrapper(T value) {
return static_cast<T>(SafeUnsignedAbs(value));
}
-template <
- typename T,
- typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+template <typename T,
+ typename std::enable_if<std::is_floating_point_v<T>>::type* = nullptr>
constexpr T AbsWrapper(T value) {
return value < 0 ? -value : value;
}
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index c816878..16fd71d 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -289,6 +289,10 @@
double last_energy_impact_;
// In mach_absolute_time units.
uint64_t last_energy_impact_time_;
+
+ // Works around a race condition when combining two task_info() calls to
+ // measure CPU time.
+ TimeDelta last_measured_cpu_;
#endif
#if BUILDFLAG(IS_MAC)
diff --git a/base/process/process_metrics_apple.cc b/base/process/process_metrics_apple.cc
index 66800b2..54dd5f2 100644
--- a/base/process/process_metrics_apple.cc
+++ b/base/process/process_metrics_apple.cc
@@ -128,7 +128,18 @@
timeradd(&user_timeval, &task_timeval, &task_timeval);
timeradd(&system_timeval, &task_timeval, &task_timeval);
- return Microseconds(TimeValToMicroseconds(task_timeval));
+ const TimeDelta measured_cpu =
+ Microseconds(TimeValToMicroseconds(task_timeval));
+ if (measured_cpu < last_measured_cpu_) {
+ // When a thread terminates, its CPU time is immediately removed from the
+ // running thread times returned by TASK_THREAD_TIMES_INFO, but there can be
+ // a lag before it shows up in the terminated thread times returned by
+ // GetTaskInfo(). Make sure CPU usage doesn't appear to go backwards if
+ // GetCumulativeCPUUsage() is called in the interval.
+ return last_measured_cpu_;
+ }
+ last_measured_cpu_ = measured_cpu;
+ return measured_cpu;
}
int ProcessMetrics::GetPackageIdleWakeupsPerSecond() {
diff --git a/base/process/process_metrics_unittest.cc b/base/process/process_metrics_unittest.cc
index 0cfd07f..7af781b 100644
--- a/base/process/process_metrics_unittest.cc
+++ b/base/process/process_metrics_unittest.cc
@@ -39,15 +39,20 @@
#include "base/process/internal_linux.h"
#endif
-namespace base {
-namespace debug {
-
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) || \
- BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE)
+#define ENABLE_CPU_TESTS 1
+#else
+#define ENABLE_CPU_TESTS 0
+#endif
+
+namespace base::debug {
namespace {
+#if ENABLE_CPU_TESTS
+
void BusyWork(std::vector<std::string>* vec) {
int64_t test_value = 0;
for (int i = 0; i < 100000; ++i) {
@@ -56,11 +61,38 @@
}
}
-} // namespace
+TimeDelta TestCumulativeCPU(ProcessMetrics* metrics, TimeDelta prev_cpu_usage) {
+ const TimeDelta current_cpu_usage = metrics->GetCumulativeCPUUsage();
+ EXPECT_GE(current_cpu_usage, prev_cpu_usage);
+ EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+ return current_cpu_usage;
+}
-#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
- // BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) ||
- // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+TimeDelta TestPreciseCumulativeCPU(ProcessMetrics* metrics,
+ TimeDelta prev_cpu_usage) {
+#if BUILDFLAG(IS_WIN)
+ const TimeDelta current_cpu_usage = metrics->GetPreciseCumulativeCPUUsage();
+ EXPECT_GE(current_cpu_usage, prev_cpu_usage);
+ EXPECT_GE(metrics->GetPreciseCPUUsage(), 0.0);
+ return current_cpu_usage;
+#else
+ // Do nothing. Not supported on this platform.
+ return base::TimeDelta();
+#endif
+}
+
+#endif // ENABLE_CPU_TESTS
+
+std::unique_ptr<ProcessMetrics> CreateProcessMetricsForTest(
+ ProcessHandle handle) {
+#if BUILDFLAG(IS_MAC)
+ return ProcessMetrics::CreateProcessMetrics(handle, nullptr);
+#else
+ return ProcessMetrics::CreateProcessMetrics(handle);
+#endif
+}
+
+} // namespace
// Tests for SystemMetrics.
// Exists as a class so it can be a friend of SystemMetrics.
@@ -345,19 +377,19 @@
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
// BUILDFLAG(IS_ANDROID)
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
- BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) || \
- BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
-
+#if ENABLE_CPU_TESTS
// Test that ProcessMetrics::GetPlatformIndependentCPUUsage() doesn't return
// negative values when the number of threads running on the process decreases
// between two successive calls to it.
TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
ProcessHandle handle = GetCurrentProcessHandle();
- std::unique_ptr<ProcessMetrics> metrics(
- ProcessMetrics::CreateProcessMetrics(handle));
+ std::unique_ptr<ProcessMetrics> metrics(CreateProcessMetricsForTest(handle));
EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+#if BUILDFLAG(IS_WIN)
+ EXPECT_GE(metrics->GetPreciseCPUUsage(), 0.0);
+#endif
+
Thread thread1("thread1");
Thread thread2("thread2");
Thread thread3("thread3");
@@ -378,31 +410,26 @@
thread2.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec2));
thread3.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec3));
- TimeDelta prev_cpu_usage = metrics->GetCumulativeCPUUsage();
- EXPECT_GE(prev_cpu_usage, TimeDelta());
- EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+ TimeDelta prev_cpu_usage = TestCumulativeCPU(metrics.get(), TimeDelta());
+ TimeDelta prev_precise_cpu_usage =
+ TestPreciseCumulativeCPU(metrics.get(), TimeDelta());
thread1.Stop();
- TimeDelta current_cpu_usage = metrics->GetCumulativeCPUUsage();
- EXPECT_GE(current_cpu_usage, prev_cpu_usage);
- prev_cpu_usage = current_cpu_usage;
- EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+ prev_cpu_usage = TestCumulativeCPU(metrics.get(), prev_cpu_usage);
+ prev_precise_cpu_usage =
+ TestPreciseCumulativeCPU(metrics.get(), prev_precise_cpu_usage);
thread2.Stop();
- current_cpu_usage = metrics->GetCumulativeCPUUsage();
- EXPECT_GE(current_cpu_usage, prev_cpu_usage);
- prev_cpu_usage = current_cpu_usage;
- EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+ prev_cpu_usage = TestCumulativeCPU(metrics.get(), prev_cpu_usage);
+ prev_precise_cpu_usage =
+ TestPreciseCumulativeCPU(metrics.get(), prev_precise_cpu_usage);
thread3.Stop();
- current_cpu_usage = metrics->GetCumulativeCPUUsage();
- EXPECT_GE(current_cpu_usage, prev_cpu_usage);
- EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
+ prev_cpu_usage = TestCumulativeCPU(metrics.get(), prev_cpu_usage);
+ prev_precise_cpu_usage =
+ TestPreciseCumulativeCPU(metrics.get(), prev_precise_cpu_usage);
}
-
-#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
- // BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_WIN) ||
- // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
+#endif // ENABLE_CPU_TESTS
#if BUILDFLAG(IS_CHROMEOS)
TEST_F(SystemMetricsTest, ParseZramMmStat) {
@@ -493,7 +520,8 @@
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
TEST(ProcessMetricsTest, ParseProcStatCPU) {
// /proc/self/stat for a process running "top".
- const char kTopStat[] = "960 (top) S 16230 960 16230 34818 960 "
+ const char kTopStat[] =
+ "960 (top) S 16230 960 16230 34818 960 "
"4202496 471 0 0 0 "
"12 16 0 0 " // <- These are the goods.
"20 0 1 0 121946157 15077376 314 18446744073709551615 4194304 "
@@ -502,7 +530,8 @@
EXPECT_EQ(12 + 16, ParseProcStatCPU(kTopStat));
// cat /proc/self/stat on a random other machine I have.
- const char kSelfStat[] = "5364 (cat) R 5354 5364 5354 34819 5364 "
+ const char kSelfStat[] =
+ "5364 (cat) R 5354 5364 5354 34819 5364 "
"0 142 0 0 0 "
"0 0 0 0 " // <- No CPU, apparently.
"16 0 1 0 1676099790 2957312 114 4294967295 134512640 134528148 "
@@ -512,7 +541,8 @@
// Some weird long-running process with a weird name that I created for the
// purposes of this test.
- const char kWeirdNameStat[] = "26115 (Hello) You ())) ) R 24614 26115 24614"
+ const char kWeirdNameStat[] =
+ "26115 (Hello) You ())) ) R 24614 26115 24614"
" 34839 26115 4218880 227 0 0 0 "
"5186 11 0 0 "
"20 0 1 0 36933953 4296704 90 18446744073709551615 4194304 4196116 "
@@ -581,8 +611,9 @@
// Busy-wait for an event to be signaled.
void WaitForEvent(const FilePath& signal_dir, const char* signal_file) {
- while (!CheckEvent(signal_dir, signal_file))
+ while (!CheckEvent(signal_dir, signal_file)) {
PlatformThread::Sleep(Milliseconds(10));
+ }
}
// Subprocess to test the number of open file descriptors.
@@ -608,8 +639,9 @@
CHECK(SignalEvent(temp_path, kSignalClosed));
// Wait to be terminated.
- while (true)
+ while (true) {
PlatformThread::Sleep(Seconds(1));
+ }
}
} // namespace
@@ -629,11 +661,7 @@
WaitForEvent(temp_path, kSignalReady);
std::unique_ptr<ProcessMetrics> metrics =
-#if BUILDFLAG(IS_MAC)
- ProcessMetrics::CreateProcessMetrics(child.Handle(), nullptr);
-#else
- ProcessMetrics::CreateProcessMetrics(child.Handle());
-#endif // BUILDFLAG(IS_MAC)
+ CreateProcessMetricsForTest(child.Handle());
const int fd_count = metrics->GetOpenFdCount();
EXPECT_GE(fd_count, 0);
@@ -654,11 +682,7 @@
TEST(ProcessMetricsTest, GetOpenFdCount) {
base::ProcessHandle process = base::GetCurrentProcessHandle();
std::unique_ptr<base::ProcessMetrics> metrics =
-#if BUILDFLAG(IS_MAC)
- ProcessMetrics::CreateProcessMetrics(process, nullptr);
-#else
- ProcessMetrics::CreateProcessMetrics(process);
-#endif // BUILDFLAG(IS_MAC)
+ CreateProcessMetricsForTest(process);
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
@@ -671,7 +695,6 @@
EXPECT_GT(new_fd_count, 0);
EXPECT_EQ(new_fd_count, fd_count + 1);
}
-
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_APPLE)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@@ -761,12 +784,12 @@
return entry.first == prev_entry.first;
});
- if (prev_it != prev_thread_times.end())
+ if (prev_it != prev_thread_times.end()) {
EXPECT_GE(entry.second, prev_it->second);
+ }
}
}
#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS)
-} // namespace debug
-} // namespace base
+} // namespace base::debug
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index 3270c04..8df6862 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -13,13 +13,8 @@
// Many files including this header rely on these being included due to IWYU
// violations. Preserve the includes for now. As code is migrated away from this
// header, we can incrementally fix the IWYU violations.
-#include "base/base_export.h"
#include "base/check.h"
#include "base/check_op.h"
-#include "base/compiler_specific.h"
-#include "base/cxx20_is_constant_evaluated.h"
#include "base/strings/string_piece_forward.h"
-#include "base/strings/utf_ostream_operators.h"
-#include "build/build_config.h"
#endif // BASE_STRINGS_STRING_PIECE_H_
diff --git a/base/strings/string_util.h b/base/strings/string_util.h
index 1279ea5..bee110b 100644
--- a/base/strings/string_util.h
+++ b/base/strings/string_util.h
@@ -115,7 +115,7 @@
// ASCII-specific tolower. The standard library's tolower is locale sensitive,
// so we don't want to use it here.
template <typename CharT,
- typename = std::enable_if_t<std::is_integral<CharT>::value>>
+ typename = std::enable_if_t<std::is_integral_v<CharT>>>
constexpr CharT ToLowerASCII(CharT c) {
return internal::ToLowerASCII(c);
}
@@ -123,7 +123,7 @@
// ASCII-specific toupper. The standard library's toupper is locale sensitive,
// so we don't want to use it here.
template <typename CharT,
- typename = std::enable_if_t<std::is_integral<CharT>::value>>
+ typename = std::enable_if_t<std::is_integral_v<CharT>>>
CharT ToUpperASCII(CharT c) {
return (c >= 'a' && c <= 'z') ? static_cast<CharT>(c + 'A' - 'a') : c;
}
diff --git a/base/strings/string_util_internal.h b/base/strings/string_util_internal.h
index 978088f..25dbc41 100644
--- a/base/strings/string_util_internal.h
+++ b/base/strings/string_util_internal.h
@@ -15,7 +15,7 @@
// ASCII-specific tolower. The standard library's tolower is locale sensitive,
// so we don't want to use it here.
template <typename CharT,
- typename = std::enable_if_t<std::is_integral<CharT>::value>>
+ typename = std::enable_if_t<std::is_integral_v<CharT>>>
constexpr CharT ToLowerASCII(CharT c) {
return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
}
diff --git a/base/strings/string_util_unittest.cc b/base/strings/string_util_unittest.cc
index 4bb22e5..b4c797f 100644
--- a/base/strings/string_util_unittest.cc
+++ b/base/strings/string_util_unittest.cc
@@ -355,59 +355,53 @@
TEST(StringUtilTest, as_wcstr) {
char16_t rw_buffer[10] = {};
static_assert(
- std::is_same<wchar_t*, decltype(as_writable_wcstr(rw_buffer))>::value,
- "");
+ std::is_same_v<wchar_t*, decltype(as_writable_wcstr(rw_buffer))>, "");
EXPECT_EQ(static_cast<void*>(rw_buffer), as_writable_wcstr(rw_buffer));
std::u16string rw_str(10, '\0');
- static_assert(
- std::is_same<wchar_t*, decltype(as_writable_wcstr(rw_str))>::value, "");
+ static_assert(std::is_same_v<wchar_t*, decltype(as_writable_wcstr(rw_str))>,
+ "");
EXPECT_EQ(static_cast<const void*>(rw_str.data()), as_writable_wcstr(rw_str));
const char16_t ro_buffer[10] = {};
- static_assert(
- std::is_same<const wchar_t*, decltype(as_wcstr(ro_buffer))>::value, "");
+ static_assert(std::is_same_v<const wchar_t*, decltype(as_wcstr(ro_buffer))>,
+ "");
EXPECT_EQ(static_cast<const void*>(ro_buffer), as_wcstr(ro_buffer));
const std::u16string ro_str(10, '\0');
- static_assert(std::is_same<const wchar_t*, decltype(as_wcstr(ro_str))>::value,
- "");
+ static_assert(std::is_same_v<const wchar_t*, decltype(as_wcstr(ro_str))>, "");
EXPECT_EQ(static_cast<const void*>(ro_str.data()), as_wcstr(ro_str));
StringPiece16 piece = ro_buffer;
- static_assert(std::is_same<const wchar_t*, decltype(as_wcstr(piece))>::value,
- "");
+ static_assert(std::is_same_v<const wchar_t*, decltype(as_wcstr(piece))>, "");
EXPECT_EQ(static_cast<const void*>(piece.data()), as_wcstr(piece));
}
TEST(StringUtilTest, as_u16cstr) {
wchar_t rw_buffer[10] = {};
static_assert(
- std::is_same<char16_t*, decltype(as_writable_u16cstr(rw_buffer))>::value,
- "");
+ std::is_same_v<char16_t*, decltype(as_writable_u16cstr(rw_buffer))>, "");
EXPECT_EQ(static_cast<void*>(rw_buffer), as_writable_u16cstr(rw_buffer));
std::wstring rw_str(10, '\0');
static_assert(
- std::is_same<char16_t*, decltype(as_writable_u16cstr(rw_str))>::value,
- "");
+ std::is_same_v<char16_t*, decltype(as_writable_u16cstr(rw_str))>, "");
EXPECT_EQ(static_cast<const void*>(rw_str.data()),
as_writable_u16cstr(rw_str));
const wchar_t ro_buffer[10] = {};
static_assert(
- std::is_same<const char16_t*, decltype(as_u16cstr(ro_buffer))>::value,
- "");
+ std::is_same_v<const char16_t*, decltype(as_u16cstr(ro_buffer))>, "");
EXPECT_EQ(static_cast<const void*>(ro_buffer), as_u16cstr(ro_buffer));
const std::wstring ro_str(10, '\0');
- static_assert(
- std::is_same<const char16_t*, decltype(as_u16cstr(ro_str))>::value, "");
+ static_assert(std::is_same_v<const char16_t*, decltype(as_u16cstr(ro_str))>,
+ "");
EXPECT_EQ(static_cast<const void*>(ro_str.data()), as_u16cstr(ro_str));
WStringPiece piece = ro_buffer;
- static_assert(
- std::is_same<const char16_t*, decltype(as_u16cstr(piece))>::value, "");
+ static_assert(std::is_same_v<const char16_t*, decltype(as_u16cstr(piece))>,
+ "");
EXPECT_EQ(static_cast<const void*>(piece.data()), as_u16cstr(piece));
}
#endif // defined(WCHAR_T_IS_UTF16)
diff --git a/base/strings/utf_string_conversions.cc b/base/strings/utf_string_conversions.cc
index eca48f0..fa95913 100644
--- a/base/strings/utf_string_conversions.cc
+++ b/base/strings/utf_string_conversions.cc
@@ -12,6 +12,7 @@
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
+#include "base/strings/utf_ostream_operators.h"
#include "base/strings/utf_string_conversion_utils.h"
#include "base/third_party/icu/icu_utf.h"
#include "build/build_config.h"
@@ -66,9 +67,9 @@
// Convenience typedef that checks whether the passed in type is integral (i.e.
// bool, char, int or their extended versions) and is of the correct size.
template <typename Char, size_t N>
-using EnableIfBitsAre = std::enable_if_t<std::is_integral<Char>::value &&
- CHAR_BIT * sizeof(Char) == N,
- bool>;
+using EnableIfBitsAre =
+ std::enable_if_t<std::is_integral_v<Char> && CHAR_BIT * sizeof(Char) == N,
+ bool>;
template <typename Char, EnableIfBitsAre<Char, 8> = true>
void UnicodeAppendUnsafe(Char* out,
diff --git a/base/system/sys_info.cc b/base/system/sys_info.cc
index 1407922..4f0f967 100644
--- a/base/system/sys_info.cc
+++ b/base/system/sys_info.cc
@@ -84,7 +84,7 @@
return IsLowEndDeviceImpl();
}
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
namespace {
@@ -96,12 +96,13 @@
k8GbOrHigher,
};
-BucketizedSize GetAndroidSystemRamBucketizedSize() {
+BucketizedSize GetSystemRamBucketizedSize() {
int physical_memory = base::SysInfo::AmountOfPhysicalMemoryMB();
// Because of Android carveouts, AmountOfPhysicalMemory() returns smaller
// than the actual memory size, So we will use a small lowerbound than "X"GB
// to discriminate real "X"GB devices from lower memory ones.
+ // Addendum: This logic should also work for ChromeOS.
constexpr int kUpperBound2GB = 2 * 1024; // inclusive
if (physical_memory <= kUpperBound2GB) {
@@ -129,46 +130,46 @@
return BucketizedSize::k8GbOrHigher;
}
-BucketizedSize GetCachedAndroidSystemRamBucketizedSize() {
- static BucketizedSize s_size = GetAndroidSystemRamBucketizedSize();
+BucketizedSize GetCachedSystemRamBucketizedSize() {
+ static BucketizedSize s_size = GetSystemRamBucketizedSize();
return s_size;
}
bool IsPartialLowEndModeOnMidRangeDevicesEnabled() {
// TODO(crbug.com/1434873): make the feature not enable on 32-bit devices
// before launching or going to high Stable %.
- return SysInfo::IsAndroid4GbOr6GbDevice() &&
+ return SysInfo::Is4GbOr6GbDevice() &&
base::FeatureList::IsEnabled(
features::kPartialLowEndModeOnMidRangeDevices);
}
} // namespace
-bool SysInfo::IsAndroid3GbDevice() {
- return GetCachedAndroidSystemRamBucketizedSize() == BucketizedSize::k3Gb;
+bool SysInfo::Is3GbDevice() {
+ return GetCachedSystemRamBucketizedSize() == BucketizedSize::k3Gb;
}
-bool SysInfo::IsAndroid4GbDevice() {
- return GetCachedAndroidSystemRamBucketizedSize() == BucketizedSize::k4Gb;
+bool SysInfo::Is4GbDevice() {
+ return GetCachedSystemRamBucketizedSize() == BucketizedSize::k4Gb;
}
-bool SysInfo::IsAndroid4GbOr6GbDevice() {
- return GetCachedAndroidSystemRamBucketizedSize() == BucketizedSize::k4Gb ||
- GetCachedAndroidSystemRamBucketizedSize() == BucketizedSize::k6Gb;
+bool SysInfo::Is4GbOr6GbDevice() {
+ return GetCachedSystemRamBucketizedSize() == BucketizedSize::k4Gb ||
+ GetCachedSystemRamBucketizedSize() == BucketizedSize::k6Gb;
}
-bool SysInfo::IsAndroid6GbDevice() {
- return GetCachedAndroidSystemRamBucketizedSize() == BucketizedSize::k6Gb;
+bool SysInfo::Is6GbDevice() {
+ return GetCachedSystemRamBucketizedSize() == BucketizedSize::k6Gb;
}
-#endif // BUILDFLAG(IS_ANDROID)
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// TODO(crbug.com/1434873): This method is for chromium native code.
// We need to update the java-side code, i.e.
// base/android/java/src/org/chromium/base/SysUtils.java,
// and to make the selected components in java to see this feature.
bool SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled() {
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
return base::SysInfo::IsLowEndDevice() ||
IsPartialLowEndModeOnMidRangeDevicesEnabled();
#else
@@ -178,7 +179,7 @@
bool SysInfo::IsLowEndDeviceOrPartialLowEndModeEnabled(
const FeatureParam<bool>& param_for_exclusion) {
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
return base::SysInfo::IsLowEndDevice() ||
(IsPartialLowEndModeOnMidRangeDevicesEnabled() &&
!param_for_exclusion.Get());
diff --git a/base/system/sys_info.h b/base/system/sys_info.h
index b07d4c9..26944d4 100644
--- a/base/system/sys_info.h
+++ b/base/system/sys_info.h
@@ -297,18 +297,19 @@
static bool IsLowEndDeviceOrPartialLowEndModeEnabled(
const FeatureParam<bool>& param_for_exclusion);
-#if BUILDFLAG(IS_ANDROID)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
// Returns true for Android devices whose memory is X GB, considering
// carveouts. The carveouts is memory reserved by the system, e.g.
// for drivers, MTE, etc. It's very common for querying app to see
// hundreds MBs less than actual physical memory installed on the system.
- static bool IsAndroid3GbDevice();
- static bool IsAndroid4GbDevice();
- static bool IsAndroid6GbDevice();
+ // Addendum: This logic should also work for ChromeOS.
+ static bool Is3GbDevice();
+ static bool Is4GbDevice();
+ static bool Is6GbDevice();
// Returns true for Android devices whose memory is 4GB or 6GB, considering
// carveouts.
- static bool IsAndroid4GbOr6GbDevice();
-#endif // BUILDFLAG(IS_ANDROID)
+ static bool Is4GbOr6GbDevice();
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_CHROMEOS)
#if BUILDFLAG(IS_MAC)
// Indicates that CPU security mitigations are enabled for the current
diff --git a/base/task/bind_post_task.h b/base/task/bind_post_task.h
index 77fafbb..cc3b8ad 100644
--- a/base/task/bind_post_task.h
+++ b/base/task/bind_post_task.h
@@ -68,7 +68,7 @@
scoped_refptr<TaskRunner> task_runner,
OnceCallback<ReturnType(Args...)> callback,
const Location& location = FROM_HERE) {
- static_assert(std::is_same<ReturnType, void>::value,
+ static_assert(std::is_same_v<ReturnType, void>,
"OnceCallback must have void return type in order to produce a "
"closure for PostTask(). Use base::IgnoreResult() to drop the "
"return value if desired.");
@@ -89,7 +89,7 @@
scoped_refptr<TaskRunner> task_runner,
RepeatingCallback<ReturnType(Args...)> callback,
const Location& location = FROM_HERE) {
- static_assert(std::is_same<ReturnType, void>::value,
+ static_assert(std::is_same_v<ReturnType, void>,
"RepeatingCallback must have void return type in order to "
"produce a closure for PostTask(). Use base::IgnoreResult() to "
"drop the return value if desired.");
diff --git a/base/task/bind_post_task_unittest.nc b/base/task/bind_post_task_unittest.nc
index ce74248..239a752 100644
--- a/base/task/bind_post_task_unittest.nc
+++ b/base/task/bind_post_task_unittest.nc
@@ -17,7 +17,7 @@
return 5;
}
-#if defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK) // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#if defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK) // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
// OnceCallback with non-void return type.
void WontCompile() {
OnceCallback<int()> cb = BindOnce(&ReturnInt);
@@ -25,7 +25,7 @@
std::move(post_cb).Run();
}
-#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK) // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK) // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
// RepeatingCallback with non-void return type.
void WontCompile() {
RepeatingCallback<int()> cb = BindRepeating(&ReturnInt);
@@ -33,7 +33,7 @@
std::move(post_cb).Run();
}
-#elif defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT) // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#elif defined(NCTEST_ONCE_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT) // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': OnceCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
// OnceCallback with non-void return type.
void WontCompile() {
OnceCallback<int()> cb = BindOnce(&ReturnInt);
@@ -41,7 +41,7 @@
std::move(post_cb).Run();
}
-#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT) // [r"fatal error: static assertion failed due to requirement 'std::is_same<int, void>::value': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
+#elif defined(NCTEST_REPEATING_NON_VOID_RETURN_BIND_POST_TASK_TO_CURRENT_DEFAULT) // [r"fatal error: static assertion failed due to requirement 'std::is_same_v<int, void>': RepeatingCallback must have void return type in order to produce a closure for PostTask\(\). Use base::IgnoreResult\(\) to drop the return value if desired."]
// RepeatingCallback with non-void return type.
void WontCompile() {
RepeatingCallback<int()> cb = BindRepeating(&ReturnInt);
diff --git a/base/task/current_thread.h b/base/task/current_thread.h
index ef2c642..43da7a5 100644
--- a/base/task/current_thread.h
+++ b/base/task/current_thread.h
@@ -215,7 +215,7 @@
#if BUILDFLAG(IS_OZONE) && !BUILDFLAG(IS_FUCHSIA) && !BUILDFLAG(IS_WIN)
static_assert(
- std::is_base_of<WatchableIOMessagePumpPosix, MessagePumpForUI>::value,
+ std::is_base_of_v<WatchableIOMessagePumpPosix, MessagePumpForUI>,
"CurrentThreadForUI::WatchFileDescriptor is supported only"
"by MessagePumpLibevent and MessagePumpGlib implementations.");
bool WatchFileDescriptor(int fd,
diff --git a/base/task/sequence_manager/tasks.cc b/base/task/sequence_manager/tasks.cc
index 79fdb2f..5bf10c7 100644
--- a/base/task/sequence_manager/tasks.cc
+++ b/base/task/sequence_manager/tasks.cc
@@ -39,7 +39,7 @@
// and it may wrap around to a negative number during the static cast, hence,
// TaskQueueImpl::DelayedIncomingQueue is especially sensitive to a potential
// change of |PendingTask::sequence_num|'s type.
- static_assert(std::is_same<decltype(sequence_num), int>::value, "");
+ static_assert(std::is_same_v<decltype(sequence_num), int>, "");
sequence_num = static_cast<int>(sequence_order);
this->is_high_res = resolution == WakeUpResolution::kHigh;
}
diff --git a/base/task/thread_pool/thread_group_impl.cc b/base/task/thread_pool/thread_group_impl.cc
index ff3831b..5ed5e8c 100644
--- a/base/task/thread_pool/thread_group_impl.cc
+++ b/base/task/thread_pool/thread_group_impl.cc
@@ -984,9 +984,9 @@
DCHECK_LT(workers_.size(), kMaxNumberOfWorkers);
DCHECK(idle_workers_set_.IsEmpty());
- // WorkerThread needs |lock_| as a predecessor for its thread lock
- // because in WakeUpOneWorker, |lock_| is first acquired and then
- // the thread lock is acquired when WakeUp is called on the worker.
+ // WorkerThread needs |lock_| as a predecessor for its thread lock because in
+ // GetWork(), |lock_| is first acquired and then the thread lock is acquired
+ // when GetLastUsedTime() is called on the worker by CanGetWorkLockRequired().
scoped_refptr<WorkerThread> worker = MakeRefCounted<WorkerThread>(
thread_type_hint_,
std::make_unique<WorkerThreadDelegateImpl>(
diff --git a/base/task/thread_pool/thread_group_impl.h b/base/task/thread_pool/thread_group_impl.h
index 9adba0f..4c6de67 100644
--- a/base/task/thread_pool/thread_group_impl.h
+++ b/base/task/thread_pool/thread_group_impl.h
@@ -89,9 +89,9 @@
ThreadGroupImpl(const ThreadGroupImpl&) = delete;
ThreadGroupImpl& operator=(const ThreadGroupImpl&) = delete;
- // Destroying a ThreadGroupImpl returned by Create() is not allowed in
- // production; it is always leaked. In tests, it can only be destroyed after
- // JoinForTesting() has returned.
+ // Destroying a ThreadGroupImpl is not allowed in production; it is always
+ // leaked. In tests, it can only be destroyed after JoinForTesting() has
+ // returned.
~ThreadGroupImpl() override;
// ThreadGroup:
@@ -155,9 +155,6 @@
void MaintainAtLeastOneIdleWorkerLockRequired(
ScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
- // Returns true if worker cleanup is permitted.
- bool CanWorkerCleanupForTestingLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
// Creates a worker, adds it to the thread group, schedules its start and
// returns it. Cannot be called before Start().
scoped_refptr<WorkerThread> CreateAndRegisterWorkerLockRequired(
diff --git a/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java b/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java
index b831435..93848c3 100644
--- a/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java
+++ b/base/test/android/junit/src/org/chromium/base/test/ShadowBuildInfo.java
@@ -13,33 +13,20 @@
/** Shadow class of {@link BuildInfo} */
@Implements(BuildInfo.class)
public class ShadowBuildInfo {
- private static boolean sIsAtLeastT;
private static boolean sTargetsAtLeastT;
/** Rests the changes made to static state. */
@Resetter
public static void reset() {
- sIsAtLeastT = false;
sTargetsAtLeastT = false;
}
- /** Whether the current build is considered to be at least T. */
- @Implementation
- public static boolean isAtLeastT() {
- return sIsAtLeastT;
- }
-
/** Whether the current build is targeting at least T. */
@Implementation
public static boolean targetsAtLeastT() {
return sTargetsAtLeastT;
}
- /** Sets whether current Android version is at least T. */
- public static void setIsAtLeastT(boolean isAtLeastT) {
- sIsAtLeastT = isAtLeastT;
- }
-
/** Sets whether the current build is targeting at least T. */
public static void setTargetsAtLeastT(boolean targetsAtLeastT) {
sTargetsAtLeastT = targetsAtLeastT;
diff --git a/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java b/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
index 18633ae..e75aa41 100644
--- a/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
+++ b/base/test/android/junit/src/org/chromium/base/test/TestListInstrumentationRunListenerTest.java
@@ -36,13 +36,11 @@
private static class ChildClass extends ParentClass {}
private static class Groups {
- // clang-format off
@ParameterizedCommandLineFlags({
@Switches({"c1", "c2"}),
@Switches({"c3", "c4"}),
})
public void testA() {}
- // clang-format on
@ParameterizedCommandLineFlags
public void testB() {}
}
@@ -61,14 +59,12 @@
ParentClass.class, "testA",
ParentClass.class.getMethod("testA").getAnnotations());
JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'method': 'testA',",
" 'annotations': {}",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
@@ -78,7 +74,6 @@
ParentClass.class, "testB",
ParentClass.class.getMethod("testB").getAnnotations());
JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'method': 'testB',",
@@ -89,7 +84,6 @@
" }",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
@@ -100,7 +94,6 @@
ChildClass.class, "testB",
ChildClass.class.getMethod("testB").getAnnotations());
JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'method': 'testB',",
@@ -111,7 +104,6 @@
" }",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
@@ -119,7 +111,6 @@
public void testGetAnnotationJSONForParentClass() throws Throwable {
JSONObject json = TestListInstrumentationRunListener.getAnnotationJSON(
Arrays.asList(ParentClass.class.getAnnotations()));
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'CommandLineFlags$Add': {",
@@ -127,7 +118,6 @@
" }",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
@@ -135,7 +125,6 @@
public void testGetAnnotationJSONForChildClass() throws Throwable {
JSONObject json = TestListInstrumentationRunListener.getAnnotationJSON(
Arrays.asList(ChildClass.class.getAnnotations()));
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'CommandLineFlags$Add': {",
@@ -146,7 +135,6 @@
" }",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
@@ -155,7 +143,6 @@
Description desc = Description.createTestDescription(
Groups.class, "testA", Groups.class.getMethod("testA").getAnnotations());
JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'method': 'testA',",
@@ -177,7 +164,6 @@
" }",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
@@ -186,7 +172,6 @@
Description desc = Description.createTestDescription(
Groups.class, "testB", Groups.class.getMethod("testB").getAnnotations());
JSONObject json = TestListInstrumentationRunListener.getTestMethodJSON(desc);
- // clang-format off
String expectedJsonString = makeJSON(
"{",
" 'method': 'testB',",
@@ -197,7 +182,6 @@
" }",
"}"
);
- // clang-format on
Assert.assertEquals(expectedJsonString, json.toString());
}
}
diff --git a/base/test/gmock_callback_support.h b/base/test/gmock_callback_support.h
index fb99024..b69b55f 100644
--- a/base/test/gmock_callback_support.h
+++ b/base/test/gmock_callback_support.h
@@ -52,7 +52,7 @@
// copy, allowing it to be used multiple times.
template <size_t I,
typename Tuple,
- std::enable_if_t<std::is_copy_constructible<Tuple>::value, int> = 0>
+ std::enable_if_t<std::is_copy_constructible_v<Tuple>, int> = 0>
auto RunOnceCallbackImpl(Tuple&& tuple) {
return
[tuple = std::forward<Tuple>(tuple)](auto&&... args) -> decltype(auto) {
@@ -67,7 +67,7 @@
// callback by move, allowing it to be only used once.
template <size_t I,
typename Tuple,
- std::enable_if_t<!std::is_copy_constructible<Tuple>::value, int> = 0>
+ std::enable_if_t<!std::is_copy_constructible_v<Tuple>, int> = 0>
auto RunOnceCallbackImpl(Tuple&& tuple) {
// Mock actions need to be copyable, but `tuple` is not. Wrap it in in a
// `scoped_refptr` to allow it to be copied.
diff --git a/base/threading/hang_watcher.h b/base/threading/hang_watcher.h
index dad6f55..85b8e71 100644
--- a/base/threading/hang_watcher.h
+++ b/base/threading/hang_watcher.h
@@ -499,7 +499,7 @@
using TimeTicksInternalRepresentation =
std::invoke_result<decltype(&TimeTicks::ToInternalValue),
TimeTicks>::type;
- static_assert(std::is_same<TimeTicksInternalRepresentation, int64_t>::value,
+ static_assert(std::is_same_v<TimeTicksInternalRepresentation, int64_t>,
"Bit manipulations made by HangWatchDeadline need to be"
"adapted if internal representation of TimeTicks changes.");
@@ -535,7 +535,7 @@
// necessary to run the proper checks to insure correctness of the conversion
// that has to go through int_64t. (See DeadlineFromBits()).
using BitsType = uint64_t;
- static_assert(std::is_same<std::underlying_type<Flag>::type, BitsType>::value,
+ static_assert(std::is_same_v<std::underlying_type<Flag>::type, BitsType>,
"Flag should have the same underlying type as bits_ to "
"simplify thinking about bit operations");
diff --git a/base/threading/post_task_and_reply_impl_unittest.cc b/base/threading/post_task_and_reply_impl_unittest.cc
index 05d3e77..024694d 100644
--- a/base/threading/post_task_and_reply_impl_unittest.cc
+++ b/base/threading/post_task_and_reply_impl_unittest.cc
@@ -11,6 +11,7 @@
#include "base/functional/callback_helpers.h"
#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/weak_ptr.h"
#include "base/task/sequenced_task_runner.h"
#include "base/test/test_mock_time_task_runner.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -24,8 +25,8 @@
class ObjectToDelete : public RefCounted<ObjectToDelete> {
public:
- // |delete_flag| is set to true when this object is deleted
- ObjectToDelete(bool* delete_flag) : delete_flag_(delete_flag) {
+ // `delete_flag` is set to true when this object is deleted
+ explicit ObjectToDelete(bool* delete_flag) : delete_flag_(delete_flag) {
EXPECT_FALSE(*delete_flag_);
}
@@ -48,6 +49,13 @@
MOCK_METHOD1(Task, void(scoped_refptr<ObjectToDelete>));
MOCK_METHOD1(Reply, void(scoped_refptr<ObjectToDelete>));
+
+ WeakPtr<MockObject> GetWeakPtr() { return weak_factory_.GetWeakPtr(); }
+
+ void InvalidateWeakPtrs() { weak_factory_.InvalidateWeakPtrs(); }
+
+ private:
+ WeakPtrFactory<MockObject> weak_factory_{this};
};
class MockRunsTasksInCurrentSequenceTaskRunner : public TestMockTimeTaskRunner {
@@ -104,23 +112,31 @@
protected:
PostTaskAndReplyImplTest() = default;
- bool PostTaskAndReplyToMockObject() {
+ bool PostTaskAndReplyToMockObject(bool task_uses_weak_ptr = false) {
+ OnceClosure task;
+ if (task_uses_weak_ptr) {
+ task = BindOnce(&MockObject::Task, mock_object_.GetWeakPtr(),
+ MakeRefCounted<ObjectToDelete>(&delete_task_flag_));
+ } else {
+ task = BindOnce(&MockObject::Task, Unretained(&mock_object_),
+ MakeRefCounted<ObjectToDelete>(&delete_task_flag_));
+ }
+
return PostTaskAndReplyImpl(
[this](const Location& location, OnceClosure task) {
return post_runner_->PostTask(location, std::move(task));
},
- FROM_HERE,
- BindOnce(&MockObject::Task, Unretained(&mock_object_),
- MakeRefCounted<ObjectToDelete>(&delete_task_flag_)),
+ FROM_HERE, std::move(task),
BindOnce(&MockObject::Reply, Unretained(&mock_object_),
MakeRefCounted<ObjectToDelete>(&delete_reply_flag_)));
}
- void ExpectPostTaskAndReplyToMockObjectSucceeds() {
+ void ExpectPostTaskAndReplyToMockObjectSucceeds(
+ bool task_uses_weak_ptr = false) {
// Expect the post to succeed.
- EXPECT_TRUE(PostTaskAndReplyToMockObject());
+ EXPECT_TRUE(PostTaskAndReplyToMockObject(task_uses_weak_ptr));
- // Expect the first task to be posted to |post_runner_|.
+ // Expect the first task to be posted to `post_runner_`.
EXPECT_TRUE(post_runner_->HasPendingTask());
EXPECT_FALSE(reply_runner_->HasPendingTask());
EXPECT_FALSE(delete_task_flag_);
@@ -149,7 +165,7 @@
EXPECT_TRUE(delete_task_flag_);
EXPECT_FALSE(delete_reply_flag_);
- // Expect the reply to be posted to |reply_runner_|.
+ // Expect the reply to be posted to `reply_runner_`.
EXPECT_FALSE(post_runner_->HasPendingTask());
EXPECT_TRUE(reply_runner_->HasPendingTask());
@@ -160,7 +176,7 @@
// The reply should have been deleted right after being run.
EXPECT_TRUE(delete_reply_flag_);
- // Expect no pending task in |post_runner_| and |reply_runner_|.
+ // Expect no pending task in `post_runner_` and `reply_runner_`.
EXPECT_FALSE(post_runner_->HasPendingTask());
EXPECT_FALSE(reply_runner_->HasPendingTask());
}
@@ -168,15 +184,15 @@
TEST_F(PostTaskAndReplyImplTest, TaskDoesNotRun) {
ExpectPostTaskAndReplyToMockObjectSucceeds();
- // Clear the |post_runner_|. Both callbacks should be scheduled for deletion
- // on the |reply_runner_|.
+ // Clear the `post_runner_`. Both callbacks should be scheduled for deletion
+ // on the `reply_runner_`.
post_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
EXPECT_FALSE(post_runner_->HasPendingTask());
EXPECT_TRUE(reply_runner_->HasPendingTask());
EXPECT_FALSE(delete_task_flag_);
EXPECT_FALSE(delete_reply_flag_);
- // Run the |reply_runner_|. Both callbacks should be deleted.
+ // Run the `reply_runner_`. Both callbacks should be deleted.
reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
EXPECT_TRUE(delete_task_flag_);
EXPECT_TRUE(delete_reply_flag_);
@@ -192,11 +208,11 @@
EXPECT_TRUE(delete_task_flag_);
EXPECT_FALSE(delete_reply_flag_);
- // Expect the reply to be posted to |reply_runner_|.
+ // Expect the reply to be posted to `reply_runner_`.
EXPECT_FALSE(post_runner_->HasPendingTask());
EXPECT_TRUE(reply_runner_->HasPendingTask());
- // Clear the |reply_runner_| queue without running tasks. The reply callback
+ // Clear the `reply_runner_` queue without running tasks. The reply callback
// should be deleted.
reply_runner_->ClearPendingTasksWithRunsTasksInCurrentSequence();
EXPECT_TRUE(delete_task_flag_);
@@ -218,4 +234,35 @@
EXPECT_TRUE(delete_reply_flag_);
}
+// Demonstrate that even if a task is not run because a weak pointer is
+// invalidated, the reply still runs.
+TEST_F(PostTaskAndReplyImplTest, ReplyStilRunsAfterInvalidatedWeakPtrTask) {
+ ExpectPostTaskAndReplyToMockObjectSucceeds(/*task_uses_weak_ptr=*/true);
+
+ // The task will not run when the provided weak pointer is invalidated.
+ EXPECT_CALL(mock_object_, Task(_)).Times(0);
+ mock_object_.InvalidateWeakPtrs();
+ post_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+ testing::Mock::VerifyAndClear(&mock_object_);
+ // The task should have been deleted as part of dropping the run because of
+ // invalidated weak pointer.
+ EXPECT_TRUE(delete_task_flag_);
+ EXPECT_FALSE(delete_reply_flag_);
+
+ // Still expect a reply to be posted to `reply_runner_`.
+ EXPECT_FALSE(post_runner_->HasPendingTask());
+ EXPECT_TRUE(reply_runner_->HasPendingTask());
+
+ EXPECT_CALL(mock_object_, Reply(_)).Times(1);
+ reply_runner_->RunUntilIdleWithRunsTasksInCurrentSequence();
+ testing::Mock::VerifyAndClear(&mock_object_);
+ EXPECT_TRUE(delete_task_flag_);
+ // The reply should have been deleted right after being run.
+ EXPECT_TRUE(delete_reply_flag_);
+
+ // Expect no pending task in `post_runner_` and `reply_runner_`.
+ EXPECT_FALSE(post_runner_->HasPendingTask());
+ EXPECT_FALSE(reply_runner_->HasPendingTask());
+}
+
} // namespace base::internal
diff --git a/base/threading/sequence_bound.h b/base/threading/sequence_bound.h
index cfaa04c..fb7b63d 100644
--- a/base/threading/sequence_bound.h
+++ b/base/threading/sequence_bound.h
@@ -648,7 +648,7 @@
template <typename ReturnType>
using AsyncCallWithBoundArgsBuilder = typename std::conditional<
- std::is_void<ReturnType>::value,
+ std::is_void_v<ReturnType>,
AsyncCallWithBoundArgsBuilderVoid,
AsyncCallWithBoundArgsBuilderDefault<ReturnType>>::type;
diff --git a/base/threading/thread_local_storage.cc b/base/threading/thread_local_storage.cc
index 457b986..8dc21cd 100644
--- a/base/threading/thread_local_storage.cc
+++ b/base/threading/thread_local_storage.cc
@@ -234,7 +234,7 @@
// typical Chromium builds where the code is in a dynamic library. For the
// static executable case, this is likely equivalent.
static_assert(
- std::is_same<PlatformThreadLocalStorage::TLSKey, pthread_key_t>::value,
+ std::is_same_v<PlatformThreadLocalStorage::TLSKey, pthread_key_t>,
"The special-case below assumes that the platform TLS implementation is "
"pthread.");
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
index 0bf24c5..b573535 100644
--- a/base/trace_event/malloc_dump_provider.cc
+++ b/base/trace_event/malloc_dump_provider.cc
@@ -461,9 +461,6 @@
}
#if BUILDFLAG(USE_PARTITION_ALLOC)
-// static
-const char* MemoryDumpPartitionStatsDumper::kPartitionsDumpName = "partitions";
-
std::string GetPartitionDumpName(const char* root_name,
const char* partition_name) {
return base::StringPrintf("%s/%s/%s", root_name,
diff --git a/base/trace_event/malloc_dump_provider.h b/base/trace_event/malloc_dump_provider.h
index 4a758e6..abc2f94 100644
--- a/base/trace_event/malloc_dump_provider.h
+++ b/base/trace_event/malloc_dump_provider.h
@@ -83,7 +83,7 @@
ProcessMemoryDump* memory_dump,
MemoryDumpLevelOfDetail level_of_detail);
- static const char* kPartitionsDumpName;
+ static constexpr char kPartitionsDumpName[] = "partitions";
// PartitionStatsDumper implementation.
void PartitionDumpTotals(
diff --git a/base/trace_event/trace_arguments.cc b/base/trace_event/trace_arguments.cc
index ad202ec..01a6589 100644
--- a/base/trace_event/trace_arguments.cc
+++ b/base/trace_event/trace_arguments.cc
@@ -16,6 +16,7 @@
#include "base/json/string_escape.h"
#include "base/memory/raw_ptr.h"
#include "base/notreached.h"
+#include "base/strings/strcat.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
@@ -196,10 +197,10 @@
// So as not to lose bits from a 64-bit pointer, output as a hex string.
// For consistency, do the same for non-JSON strings, but without the
// surrounding quotes.
- const char* format_string = as_json ? "\"0x%" PRIx64 "\"" : "0x%" PRIx64;
- StringAppendF(
- out, format_string,
+ const std::string value = StringPrintf(
+ "0x%" PRIx64,
static_cast<uint64_t>(reinterpret_cast<uintptr_t>(this->as_pointer)));
+ *out += as_json ? StrCat({"\"", value, "\""}) : value;
} break;
case TRACE_VALUE_TYPE_STRING:
case TRACE_VALUE_TYPE_COPY_STRING:
diff --git a/base/tracing/protos/chrome_track_event.proto b/base/tracing/protos/chrome_track_event.proto
index c3c1888..4b6a954 100644
--- a/base/tracing/protos/chrome_track_event.proto
+++ b/base/tracing/protos/chrome_track_event.proto
@@ -1427,7 +1427,9 @@
message ScrollPredictorMetrics {
message EventFrameValue {
optional int64 event_trace_id = 1;
- optional float delta_value = 2;
+ // The fractional pixels (can be fractional after the predictor adjusts in
+ // resampling of input) that the page was scrolled by this frame.
+ optional float delta_value_pixels = 2;
};
// Data from the previous, current, and next frame used to determine the
// values below as according to the metric doc:
@@ -1438,7 +1440,7 @@
// This is the amount of delta processed in this frame that was above the
// janky threshold (as defined by
// http://doc/1Y0u0Tq5eUZff75nYUzQVw6JxmbZAW9m64pJidmnGWsY)
- optional float janky_value = 4;
+ optional float janky_value_pixels = 4;
// True if we are also missing frames (so multiple frames are being presented
// at once).
optional bool has_missed_vsyncs = 5;
diff --git a/mojo/public/tools/bindings/mojom.gni b/mojo/public/tools/bindings/mojom.gni
index c0667a7..bebb753 100644
--- a/mojo/public/tools/bindings/mojom.gni
+++ b/mojo/public/tools/bindings/mojom.gni
@@ -2147,6 +2147,11 @@
args += message_scrambling_args
}
+ if (defined(invoker.js_generate_struct_deserializers) &&
+ invoker.js_generate_struct_deserializers) {
+ args += [ "--js_generate_struct_deserializers" ]
+ }
+
# TODO(crbug.com/1007587): Support scramble_message_ids if above is
# insufficient.
# TODO(crbug.com/1007591): Support generate_fuzzing.
diff --git a/mojo/public/tools/mojom/check_stable_mojom_compatibility.py b/mojo/public/tools/mojom/check_stable_mojom_compatibility.py
index 8b694e0..77c3008 100755
--- a/mojo/public/tools/mojom/check_stable_mojom_compatibility.py
+++ b/mojo/public/tools/mojom/check_stable_mojom_compatibility.py
@@ -80,7 +80,7 @@
# (at the moment) since they may not exist in the output directory.
generated_files_to_skip = {
('third_party/blink/public/mojom/runtime_feature_state/'
- 'runtime_feature_state.mojom'),
+ 'runtime_feature.mojom'),
('third_party/blink/public/mojom/origin_trial_feature/'
'origin_trial_feature.mojom'),
}